Tue, 08 Feb 2011 12:33:19 +0100
7014851: Remove unused parallel compaction code
Summary: Removed.
Reviewed-by: jcoomes, brutisso
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/verifier.hpp"
29 #include "classfile/vmSymbols.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "gc_implementation/shared/markSweep.inline.hpp"
32 #include "gc_interface/collectedHeap.inline.hpp"
33 #include "interpreter/oopMapCache.hpp"
34 #include "interpreter/rewriter.hpp"
35 #include "jvmtifiles/jvmti.h"
36 #include "memory/genOopClosures.inline.hpp"
37 #include "memory/oopFactory.hpp"
38 #include "memory/permGen.hpp"
39 #include "oops/instanceKlass.hpp"
40 #include "oops/instanceOop.hpp"
41 #include "oops/methodOop.hpp"
42 #include "oops/objArrayKlassKlass.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "oops/symbol.hpp"
45 #include "prims/jvmtiExport.hpp"
46 #include "prims/jvmtiRedefineClassesTrace.hpp"
47 #include "runtime/fieldDescriptor.hpp"
48 #include "runtime/handles.inline.hpp"
49 #include "runtime/javaCalls.hpp"
50 #include "runtime/mutexLocker.hpp"
51 #include "services/threadService.hpp"
52 #include "utilities/dtrace.hpp"
53 #ifdef TARGET_OS_FAMILY_linux
54 # include "thread_linux.inline.hpp"
55 #endif
56 #ifdef TARGET_OS_FAMILY_solaris
57 # include "thread_solaris.inline.hpp"
58 #endif
59 #ifdef TARGET_OS_FAMILY_windows
60 # include "thread_windows.inline.hpp"
61 #endif
62 #ifndef SERIALGC
63 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
64 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
65 #include "gc_implementation/g1/g1RemSet.inline.hpp"
66 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
67 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
68 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
69 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
70 #include "oops/oop.pcgc.inline.hpp"
71 #endif
72 #ifdef COMPILER1
73 #include "c1/c1_Compiler.hpp"
74 #endif
76 #ifdef DTRACE_ENABLED
78 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
79 char*, intptr_t, oop, intptr_t);
80 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
81 char*, intptr_t, oop, intptr_t, int);
82 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
83 char*, intptr_t, oop, intptr_t, int);
84 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
85 char*, intptr_t, oop, intptr_t, int);
86 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
87 char*, intptr_t, oop, intptr_t, int);
88 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
89 char*, intptr_t, oop, intptr_t, int);
90 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
91 char*, intptr_t, oop, intptr_t, int);
92 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
93 char*, intptr_t, oop, intptr_t, int);
95 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) \
96 { \
97 char* data = NULL; \
98 int len = 0; \
99 Symbol* name = (clss)->name(); \
100 if (name != NULL) { \
101 data = (char*)name->bytes(); \
102 len = name->utf8_length(); \
103 } \
104 HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \
105 data, len, (clss)->class_loader(), thread_type); \
106 }
108 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
109 { \
110 char* data = NULL; \
111 int len = 0; \
112 Symbol* name = (clss)->name(); \
113 if (name != NULL) { \
114 data = (char*)name->bytes(); \
115 len = name->utf8_length(); \
116 } \
117 HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \
118 data, len, (clss)->class_loader(), thread_type, wait); \
119 }
121 #else // ndef DTRACE_ENABLED
123 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)
124 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait)
126 #endif // ndef DTRACE_ENABLED
128 bool instanceKlass::should_be_initialized() const {
129 return !is_initialized();
130 }
132 klassVtable* instanceKlass::vtable() const {
133 return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size());
134 }
136 klassItable* instanceKlass::itable() const {
137 return new klassItable(as_klassOop());
138 }
140 void instanceKlass::eager_initialize(Thread *thread) {
141 if (!EagerInitialization) return;
143 if (this->is_not_initialized()) {
144 // abort if the the class has a class initializer
145 if (this->class_initializer() != NULL) return;
147 // abort if it is java.lang.Object (initialization is handled in genesis)
148 klassOop super = this->super();
149 if (super == NULL) return;
151 // abort if the super class should be initialized
152 if (!instanceKlass::cast(super)->is_initialized()) return;
154 // call body to expose the this pointer
155 instanceKlassHandle this_oop(thread, this->as_klassOop());
156 eager_initialize_impl(this_oop);
157 }
158 }
161 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
162 EXCEPTION_MARK;
163 ObjectLocker ol(this_oop, THREAD);
165 // abort if someone beat us to the initialization
166 if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized()
168 ClassState old_state = this_oop->_init_state;
169 link_class_impl(this_oop, true, THREAD);
170 if (HAS_PENDING_EXCEPTION) {
171 CLEAR_PENDING_EXCEPTION;
172 // Abort if linking the class throws an exception.
174 // Use a test to avoid redundantly resetting the state if there's
175 // no change. Set_init_state() asserts that state changes make
176 // progress, whereas here we might just be spinning in place.
177 if( old_state != this_oop->_init_state )
178 this_oop->set_init_state (old_state);
179 } else {
180 // linking successfull, mark class as initialized
181 this_oop->set_init_state (fully_initialized);
182 // trace
183 if (TraceClassInitialization) {
184 ResourceMark rm(THREAD);
185 tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
186 }
187 }
188 }
191 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
192 // process. The step comments refers to the procedure described in that section.
193 // Note: implementation moved to static method to expose the this pointer.
194 void instanceKlass::initialize(TRAPS) {
195 if (this->should_be_initialized()) {
196 HandleMark hm(THREAD);
197 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
198 initialize_impl(this_oop, CHECK);
199 // Note: at this point the class may be initialized
200 // OR it may be in the state of being initialized
201 // in case of recursive initialization!
202 } else {
203 assert(is_initialized(), "sanity check");
204 }
205 }
208 bool instanceKlass::verify_code(
209 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
210 // 1) Verify the bytecodes
211 Verifier::Mode mode =
212 throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
213 return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
214 }
217 // Used exclusively by the shared spaces dump mechanism to prevent
218 // classes mapped into the shared regions in new VMs from appearing linked.
220 void instanceKlass::unlink_class() {
221 assert(is_linked(), "must be linked");
222 _init_state = loaded;
223 }
225 void instanceKlass::link_class(TRAPS) {
226 assert(is_loaded(), "must be loaded");
227 if (!is_linked()) {
228 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
229 link_class_impl(this_oop, true, CHECK);
230 }
231 }
233 // Called to verify that a class can link during initialization, without
234 // throwing a VerifyError.
235 bool instanceKlass::link_class_or_fail(TRAPS) {
236 assert(is_loaded(), "must be loaded");
237 if (!is_linked()) {
238 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
239 link_class_impl(this_oop, false, CHECK_false);
240 }
241 return is_linked();
242 }
244 bool instanceKlass::link_class_impl(
245 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
246 // check for error state
247 if (this_oop->is_in_error_state()) {
248 ResourceMark rm(THREAD);
249 THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
250 this_oop->external_name(), false);
251 }
252 // return if already verified
253 if (this_oop->is_linked()) {
254 return true;
255 }
257 // Timing
258 // timer handles recursion
259 assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
260 JavaThread* jt = (JavaThread*)THREAD;
262 // link super class before linking this class
263 instanceKlassHandle super(THREAD, this_oop->super());
264 if (super.not_null()) {
265 if (super->is_interface()) { // check if super class is an interface
266 ResourceMark rm(THREAD);
267 Exceptions::fthrow(
268 THREAD_AND_LOCATION,
269 vmSymbols::java_lang_IncompatibleClassChangeError(),
270 "class %s has interface %s as super class",
271 this_oop->external_name(),
272 super->external_name()
273 );
274 return false;
275 }
277 link_class_impl(super, throw_verifyerror, CHECK_false);
278 }
280 // link all interfaces implemented by this class before linking this class
281 objArrayHandle interfaces (THREAD, this_oop->local_interfaces());
282 int num_interfaces = interfaces->length();
283 for (int index = 0; index < num_interfaces; index++) {
284 HandleMark hm(THREAD);
285 instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index)));
286 link_class_impl(ih, throw_verifyerror, CHECK_false);
287 }
289 // in case the class is linked in the process of linking its superclasses
290 if (this_oop->is_linked()) {
291 return true;
292 }
294 // trace only the link time for this klass that includes
295 // the verification time
296 PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(),
297 ClassLoader::perf_class_link_selftime(),
298 ClassLoader::perf_classes_linked(),
299 jt->get_thread_stat()->perf_recursion_counts_addr(),
300 jt->get_thread_stat()->perf_timers_addr(),
301 PerfClassTraceTime::CLASS_LINK);
303 // verification & rewriting
304 {
305 ObjectLocker ol(this_oop, THREAD);
306 // rewritten will have been set if loader constraint error found
307 // on an earlier link attempt
308 // don't verify or rewrite if already rewritten
309 if (!this_oop->is_linked()) {
310 if (!this_oop->is_rewritten()) {
311 {
312 // Timer includes any side effects of class verification (resolution,
313 // etc), but not recursive entry into verify_code().
314 PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
315 ClassLoader::perf_class_verify_selftime(),
316 ClassLoader::perf_classes_verified(),
317 jt->get_thread_stat()->perf_recursion_counts_addr(),
318 jt->get_thread_stat()->perf_timers_addr(),
319 PerfClassTraceTime::CLASS_VERIFY);
320 bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
321 if (!verify_ok) {
322 return false;
323 }
324 }
326 // Just in case a side-effect of verify linked this class already
327 // (which can sometimes happen since the verifier loads classes
328 // using custom class loaders, which are free to initialize things)
329 if (this_oop->is_linked()) {
330 return true;
331 }
333 // also sets rewritten
334 this_oop->rewrite_class(CHECK_false);
335 }
337 // Initialize the vtable and interface table after
338 // methods have been rewritten since rewrite may
339 // fabricate new methodOops.
340 // also does loader constraint checking
341 if (!this_oop()->is_shared()) {
342 ResourceMark rm(THREAD);
343 this_oop->vtable()->initialize_vtable(true, CHECK_false);
344 this_oop->itable()->initialize_itable(true, CHECK_false);
345 }
346 #ifdef ASSERT
347 else {
348 ResourceMark rm(THREAD);
349 this_oop->vtable()->verify(tty, true);
350 // In case itable verification is ever added.
351 // this_oop->itable()->verify(tty, true);
352 }
353 #endif
354 this_oop->set_init_state(linked);
355 if (JvmtiExport::should_post_class_prepare()) {
356 Thread *thread = THREAD;
357 assert(thread->is_Java_thread(), "thread->is_Java_thread()");
358 JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
359 }
360 }
361 }
362 return true;
363 }
366 // Rewrite the byte codes of all of the methods of a class.
367 // Three cases:
368 // During the link of a newly loaded class.
369 // During the preloading of classes to be written to the shared spaces.
370 // - Rewrite the methods and update the method entry points.
371 //
372 // During the link of a class in the shared spaces.
373 // - The methods were already rewritten, update the metho entry points.
374 //
375 // The rewriter must be called exactly once. Rewriting must happen after
376 // verification but before the first method of the class is executed.
378 void instanceKlass::rewrite_class(TRAPS) {
379 assert(is_loaded(), "must be loaded");
380 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
381 if (this_oop->is_rewritten()) {
382 assert(this_oop()->is_shared(), "rewriting an unshared class?");
383 return;
384 }
385 Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
386 this_oop->set_rewritten();
387 }
390 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
391 // Make sure klass is linked (verified) before initialization
392 // A class could already be verified, since it has been reflected upon.
393 this_oop->link_class(CHECK);
395 DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1);
397 bool wait = false;
399 // refer to the JVM book page 47 for description of steps
400 // Step 1
401 { ObjectLocker ol(this_oop, THREAD);
403 Thread *self = THREAD; // it's passed the current thread
405 // Step 2
406 // If we were to use wait() instead of waitInterruptibly() then
407 // we might end up throwing IE from link/symbol resolution sites
408 // that aren't expected to throw. This would wreak havoc. See 6320309.
409 while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
410 wait = true;
411 ol.waitUninterruptibly(CHECK);
412 }
414 // Step 3
415 if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) {
416 DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait);
417 return;
418 }
420 // Step 4
421 if (this_oop->is_initialized()) {
422 DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait);
423 return;
424 }
426 // Step 5
427 if (this_oop->is_in_error_state()) {
428 DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait);
429 ResourceMark rm(THREAD);
430 const char* desc = "Could not initialize class ";
431 const char* className = this_oop->external_name();
432 size_t msglen = strlen(desc) + strlen(className) + 1;
433 char* message = NEW_RESOURCE_ARRAY(char, msglen);
434 if (NULL == message) {
435 // Out of memory: can't create detailed error message
436 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
437 } else {
438 jio_snprintf(message, msglen, "%s%s", desc, className);
439 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
440 }
441 }
443 // Step 6
444 this_oop->set_init_state(being_initialized);
445 this_oop->set_init_thread(self);
446 }
448 // Step 7
449 klassOop super_klass = this_oop->super();
450 if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
451 Klass::cast(super_klass)->initialize(THREAD);
453 if (HAS_PENDING_EXCEPTION) {
454 Handle e(THREAD, PENDING_EXCEPTION);
455 CLEAR_PENDING_EXCEPTION;
456 {
457 EXCEPTION_MARK;
458 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
459 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, superclass initialization error is thrown below
460 }
461 DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait);
462 THROW_OOP(e());
463 }
464 }
466 // Step 8
467 {
468 assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
469 JavaThread* jt = (JavaThread*)THREAD;
470 DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait);
471 // Timer includes any side effects of class initialization (resolution,
472 // etc), but not recursive entry into call_class_initializer().
473 PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
474 ClassLoader::perf_class_init_selftime(),
475 ClassLoader::perf_classes_inited(),
476 jt->get_thread_stat()->perf_recursion_counts_addr(),
477 jt->get_thread_stat()->perf_timers_addr(),
478 PerfClassTraceTime::CLASS_CLINIT);
479 this_oop->call_class_initializer(THREAD);
480 }
482 // Step 9
483 if (!HAS_PENDING_EXCEPTION) {
484 this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
485 { ResourceMark rm(THREAD);
486 debug_only(this_oop->vtable()->verify(tty, true);)
487 }
488 }
489 else {
490 // Step 10 and 11
491 Handle e(THREAD, PENDING_EXCEPTION);
492 CLEAR_PENDING_EXCEPTION;
493 {
494 EXCEPTION_MARK;
495 this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
496 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below
497 }
498 DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait);
499 if (e->is_a(SystemDictionary::Error_klass())) {
500 THROW_OOP(e());
501 } else {
502 JavaCallArguments args(e);
503 THROW_ARG(vmSymbols::java_lang_ExceptionInInitializerError(),
504 vmSymbols::throwable_void_signature(),
505 &args);
506 }
507 }
508 DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait);
509 }
512 // Note: implementation moved to static method to expose the this pointer.
513 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
514 instanceKlassHandle kh(THREAD, this->as_klassOop());
515 set_initialization_state_and_notify_impl(kh, state, CHECK);
516 }
518 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
519 ObjectLocker ol(this_oop, THREAD);
520 this_oop->set_init_state(state);
521 ol.notify_all(CHECK);
522 }
524 void instanceKlass::add_implementor(klassOop k) {
525 assert(Compile_lock->owned_by_self(), "");
526 // Filter out my subinterfaces.
527 // (Note: Interfaces are never on the subklass list.)
528 if (instanceKlass::cast(k)->is_interface()) return;
530 // Filter out subclasses whose supers already implement me.
531 // (Note: CHA must walk subclasses of direct implementors
532 // in order to locate indirect implementors.)
533 klassOop sk = instanceKlass::cast(k)->super();
534 if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop()))
535 // We only need to check one immediate superclass, since the
536 // implements_interface query looks at transitive_interfaces.
537 // Any supers of the super have the same (or fewer) transitive_interfaces.
538 return;
540 // Update number of implementors
541 int i = _nof_implementors++;
543 // Record this implementor, if there are not too many already
544 if (i < implementors_limit) {
545 assert(_implementors[i] == NULL, "should be exactly one implementor");
546 oop_store_without_check((oop*)&_implementors[i], k);
547 } else if (i == implementors_limit) {
548 // clear out the list on first overflow
549 for (int i2 = 0; i2 < implementors_limit; i2++)
550 oop_store_without_check((oop*)&_implementors[i2], NULL);
551 }
553 // The implementor also implements the transitive_interfaces
554 for (int index = 0; index < local_interfaces()->length(); index++) {
555 instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k);
556 }
557 }
559 void instanceKlass::init_implementor() {
560 for (int i = 0; i < implementors_limit; i++)
561 oop_store_without_check((oop*)&_implementors[i], NULL);
562 _nof_implementors = 0;
563 }
566 void instanceKlass::process_interfaces(Thread *thread) {
567 // link this class into the implementors list of every interface it implements
568 KlassHandle this_as_oop (thread, this->as_klassOop());
569 for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
570 assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass");
571 instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i)));
572 assert(interf->is_interface(), "expected interface");
573 interf->add_implementor(this_as_oop());
574 }
575 }
577 bool instanceKlass::can_be_primary_super_slow() const {
578 if (is_interface())
579 return false;
580 else
581 return Klass::can_be_primary_super_slow();
582 }
584 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
585 // The secondaries are the implemented interfaces.
586 instanceKlass* ik = instanceKlass::cast(as_klassOop());
587 objArrayHandle interfaces (THREAD, ik->transitive_interfaces());
588 int num_secondaries = num_extra_slots + interfaces->length();
589 if (num_secondaries == 0) {
590 return Universe::the_empty_system_obj_array();
591 } else if (num_extra_slots == 0) {
592 return interfaces();
593 } else {
594 // a mix of both
595 objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
596 for (int i = 0; i < interfaces->length(); i++) {
597 secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i));
598 }
599 return secondaries;
600 }
601 }
603 bool instanceKlass::compute_is_subtype_of(klassOop k) {
604 if (Klass::cast(k)->is_interface()) {
605 return implements_interface(k);
606 } else {
607 return Klass::compute_is_subtype_of(k);
608 }
609 }
611 bool instanceKlass::implements_interface(klassOop k) const {
612 if (as_klassOop() == k) return true;
613 assert(Klass::cast(k)->is_interface(), "should be an interface class");
614 for (int i = 0; i < transitive_interfaces()->length(); i++) {
615 if (transitive_interfaces()->obj_at(i) == k) {
616 return true;
617 }
618 }
619 return false;
620 }
622 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) {
623 if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
624 if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
625 report_java_out_of_memory("Requested array size exceeds VM limit");
626 THROW_OOP_0(Universe::out_of_memory_error_array_size());
627 }
628 int size = objArrayOopDesc::object_size(length);
629 klassOop ak = array_klass(n, CHECK_NULL);
630 KlassHandle h_ak (THREAD, ak);
631 objArrayOop o =
632 (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL);
633 return o;
634 }
636 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) {
637 if (TraceFinalizerRegistration) {
638 tty->print("Registered ");
639 i->print_value_on(tty);
640 tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i);
641 }
642 instanceHandle h_i(THREAD, i);
643 // Pass the handle as argument, JavaCalls::call expects oop as jobjects
644 JavaValue result(T_VOID);
645 JavaCallArguments args(h_i);
646 methodHandle mh (THREAD, Universe::finalizer_register_method());
647 JavaCalls::call(&result, mh, &args, CHECK_NULL);
648 return h_i();
649 }
651 instanceOop instanceKlass::allocate_instance(TRAPS) {
652 bool has_finalizer_flag = has_finalizer(); // Query before possible GC
653 int size = size_helper(); // Query before forming handle.
655 KlassHandle h_k(THREAD, as_klassOop());
657 instanceOop i;
659 i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
660 if (has_finalizer_flag && !RegisterFinalizersAtInit) {
661 i = register_finalizer(i, CHECK_NULL);
662 }
663 return i;
664 }
666 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) {
667 // Finalizer registration occurs in the Object.<init> constructor
668 // and constructors normally aren't run when allocating perm
669 // instances so simply disallow finalizable perm objects. This can
670 // be relaxed if a need for it is found.
671 assert(!has_finalizer(), "perm objects not allowed to have finalizers");
672 int size = size_helper(); // Query before forming handle.
673 KlassHandle h_k(THREAD, as_klassOop());
674 instanceOop i = (instanceOop)
675 CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
676 return i;
677 }
679 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
680 if (is_interface() || is_abstract()) {
681 ResourceMark rm(THREAD);
682 THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
683 : vmSymbols::java_lang_InstantiationException(), external_name());
684 }
685 if (as_klassOop() == SystemDictionary::Class_klass()) {
686 ResourceMark rm(THREAD);
687 THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
688 : vmSymbols::java_lang_IllegalAccessException(), external_name());
689 }
690 }
692 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
693 instanceKlassHandle this_oop(THREAD, as_klassOop());
694 return array_klass_impl(this_oop, or_null, n, THREAD);
695 }
697 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
698 if (this_oop->array_klasses() == NULL) {
699 if (or_null) return NULL;
701 ResourceMark rm;
702 JavaThread *jt = (JavaThread *)THREAD;
703 {
704 // Atomic creation of array_klasses
705 MutexLocker mc(Compile_lock, THREAD); // for vtables
706 MutexLocker ma(MultiArray_lock, THREAD);
708 // Check if update has already taken place
709 if (this_oop->array_klasses() == NULL) {
710 objArrayKlassKlass* oakk =
711 (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
713 klassOop k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);
714 this_oop->set_array_klasses(k);
715 }
716 }
717 }
718 // _this will always be set at this point
719 objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part();
720 if (or_null) {
721 return oak->array_klass_or_null(n);
722 }
723 return oak->array_klass(n, CHECK_NULL);
724 }
726 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) {
727 return array_klass_impl(or_null, 1, THREAD);
728 }
730 void instanceKlass::call_class_initializer(TRAPS) {
731 instanceKlassHandle ik (THREAD, as_klassOop());
732 call_class_initializer_impl(ik, THREAD);
733 }
735 static int call_class_initializer_impl_counter = 0; // for debugging
737 methodOop instanceKlass::class_initializer() {
738 return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
739 }
741 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
742 methodHandle h_method(THREAD, this_oop->class_initializer());
743 assert(!this_oop->is_initialized(), "we cannot initialize twice");
744 if (TraceClassInitialization) {
745 tty->print("%d Initializing ", call_class_initializer_impl_counter++);
746 this_oop->name()->print_value();
747 tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
748 }
749 if (h_method() != NULL) {
750 JavaCallArguments args; // No arguments
751 JavaValue result(T_VOID);
752 JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
753 }
754 }
757 void instanceKlass::mask_for(methodHandle method, int bci,
758 InterpreterOopMap* entry_for) {
759 // Dirty read, then double-check under a lock.
760 if (_oop_map_cache == NULL) {
761 // Otherwise, allocate a new one.
762 MutexLocker x(OopMapCacheAlloc_lock);
763 // First time use. Allocate a cache in C heap
764 if (_oop_map_cache == NULL) {
765 _oop_map_cache = new OopMapCache();
766 }
767 }
768 // _oop_map_cache is constant after init; lookup below does is own locking.
769 _oop_map_cache->lookup(method, bci, entry_for);
770 }
773 bool instanceKlass::find_local_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
774 const int n = fields()->length();
775 for (int i = 0; i < n; i += next_offset ) {
776 int name_index = fields()->ushort_at(i + name_index_offset);
777 int sig_index = fields()->ushort_at(i + signature_index_offset);
778 Symbol* f_name = constants()->symbol_at(name_index);
779 Symbol* f_sig = constants()->symbol_at(sig_index);
780 if (f_name == name && f_sig == sig) {
781 fd->initialize(as_klassOop(), i);
782 return true;
783 }
784 }
785 return false;
786 }
789 void instanceKlass::shared_symbols_iterate(SymbolClosure* closure) {
790 Klass::shared_symbols_iterate(closure);
791 closure->do_symbol(&_generic_signature);
792 closure->do_symbol(&_source_file_name);
793 closure->do_symbol(&_source_debug_extension);
795 const int n = fields()->length();
796 for (int i = 0; i < n; i += next_offset ) {
797 int name_index = fields()->ushort_at(i + name_index_offset);
798 closure->do_symbol(constants()->symbol_at_addr(name_index));
799 int sig_index = fields()->ushort_at(i + signature_index_offset);
800 closure->do_symbol(constants()->symbol_at_addr(sig_index));
801 }
802 }
805 klassOop instanceKlass::find_interface_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
806 const int n = local_interfaces()->length();
807 for (int i = 0; i < n; i++) {
808 klassOop intf1 = klassOop(local_interfaces()->obj_at(i));
809 assert(Klass::cast(intf1)->is_interface(), "just checking type");
810 // search for field in current interface
811 if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) {
812 assert(fd->is_static(), "interface field must be static");
813 return intf1;
814 }
815 // search for field in direct superinterfaces
816 klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd);
817 if (intf2 != NULL) return intf2;
818 }
819 // otherwise field lookup fails
820 return NULL;
821 }
824 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
825 // search order according to newest JVM spec (5.4.3.2, p.167).
826 // 1) search for field in current klass
827 if (find_local_field(name, sig, fd)) {
828 return as_klassOop();
829 }
830 // 2) search for field recursively in direct superinterfaces
831 { klassOop intf = find_interface_field(name, sig, fd);
832 if (intf != NULL) return intf;
833 }
834 // 3) apply field lookup recursively if superclass exists
835 { klassOop supr = super();
836 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd);
837 }
838 // 4) otherwise field lookup fails
839 return NULL;
840 }
843 klassOop instanceKlass::find_field(Symbol* name, Symbol* sig, bool is_static, fieldDescriptor* fd) const {
844 // search order according to newest JVM spec (5.4.3.2, p.167).
845 // 1) search for field in current klass
846 if (find_local_field(name, sig, fd)) {
847 if (fd->is_static() == is_static) return as_klassOop();
848 }
849 // 2) search for field recursively in direct superinterfaces
850 if (is_static) {
851 klassOop intf = find_interface_field(name, sig, fd);
852 if (intf != NULL) return intf;
853 }
854 // 3) apply field lookup recursively if superclass exists
855 { klassOop supr = super();
856 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd);
857 }
858 // 4) otherwise field lookup fails
859 return NULL;
860 }
863 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
864 int length = fields()->length();
865 for (int i = 0; i < length; i += next_offset) {
866 if (offset_from_fields( i ) == offset) {
867 fd->initialize(as_klassOop(), i);
868 if (fd->is_static() == is_static) return true;
869 }
870 }
871 return false;
872 }
875 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
876 klassOop klass = as_klassOop();
877 while (klass != NULL) {
878 if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) {
879 return true;
880 }
881 klass = Klass::cast(klass)->super();
882 }
883 return false;
884 }
887 void instanceKlass::methods_do(void f(methodOop method)) {
888 int len = methods()->length();
889 for (int index = 0; index < len; index++) {
890 methodOop m = methodOop(methods()->obj_at(index));
891 assert(m->is_method(), "must be method");
892 f(m);
893 }
894 }
896 void instanceKlass::do_local_static_fields(FieldClosure* cl) {
897 fieldDescriptor fd;
898 int length = fields()->length();
899 for (int i = 0; i < length; i += next_offset) {
900 fd.initialize(as_klassOop(), i);
901 if (fd.is_static()) cl->do_field(&fd);
902 }
903 }
906 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
907 instanceKlassHandle h_this(THREAD, as_klassOop());
908 do_local_static_fields_impl(h_this, f, CHECK);
909 }
912 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
913 fieldDescriptor fd;
914 int length = this_oop->fields()->length();
915 for (int i = 0; i < length; i += next_offset) {
916 fd.initialize(this_oop(), i);
917 if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
918 }
919 }
922 static int compare_fields_by_offset(int* a, int* b) {
923 return a[0] - b[0];
924 }
926 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
927 instanceKlass* super = superklass();
928 if (super != NULL) {
929 super->do_nonstatic_fields(cl);
930 }
931 fieldDescriptor fd;
932 int length = fields()->length();
933 // In DebugInfo nonstatic fields are sorted by offset.
934 int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
935 int j = 0;
936 for (int i = 0; i < length; i += next_offset) {
937 fd.initialize(as_klassOop(), i);
938 if (!fd.is_static()) {
939 fields_sorted[j + 0] = fd.offset();
940 fields_sorted[j + 1] = i;
941 j += 2;
942 }
943 }
944 if (j > 0) {
945 length = j;
946 // _sort_Fn is defined in growableArray.hpp.
947 qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
948 for (int i = 0; i < length; i += 2) {
949 fd.initialize(as_klassOop(), fields_sorted[i + 1]);
950 assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
951 cl->do_field(&fd);
952 }
953 }
954 FREE_C_HEAP_ARRAY(int, fields_sorted);
955 }
958 void instanceKlass::array_klasses_do(void f(klassOop k)) {
959 if (array_klasses() != NULL)
960 arrayKlass::cast(array_klasses())->array_klasses_do(f);
961 }
964 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
965 f(as_klassOop());
966 array_klasses_do(f);
967 }
969 #ifdef ASSERT
970 static int linear_search(objArrayOop methods, Symbol* name, Symbol* signature) {
971 int len = methods->length();
972 for (int index = 0; index < len; index++) {
973 methodOop m = (methodOop)(methods->obj_at(index));
974 assert(m->is_method(), "must be method");
975 if (m->signature() == signature && m->name() == name) {
976 return index;
977 }
978 }
979 return -1;
980 }
981 #endif
983 methodOop instanceKlass::find_method(Symbol* name, Symbol* signature) const {
984 return instanceKlass::find_method(methods(), name, signature);
985 }
987 methodOop instanceKlass::find_method(objArrayOop methods, Symbol* name, Symbol* signature) {
988 int len = methods->length();
989 // methods are sorted, so do binary search
990 int l = 0;
991 int h = len - 1;
992 while (l <= h) {
993 int mid = (l + h) >> 1;
994 methodOop m = (methodOop)methods->obj_at(mid);
995 assert(m->is_method(), "must be method");
996 int res = m->name()->fast_compare(name);
997 if (res == 0) {
998 // found matching name; do linear search to find matching signature
999 // first, quick check for common case
1000 if (m->signature() == signature) return m;
1001 // search downwards through overloaded methods
1002 int i;
1003 for (i = mid - 1; i >= l; i--) {
1004 methodOop m = (methodOop)methods->obj_at(i);
1005 assert(m->is_method(), "must be method");
1006 if (m->name() != name) break;
1007 if (m->signature() == signature) return m;
1008 }
1009 // search upwards
1010 for (i = mid + 1; i <= h; i++) {
1011 methodOop m = (methodOop)methods->obj_at(i);
1012 assert(m->is_method(), "must be method");
1013 if (m->name() != name) break;
1014 if (m->signature() == signature) return m;
1015 }
1016 // not found
1017 #ifdef ASSERT
1018 int index = linear_search(methods, name, signature);
1019 assert(index == -1, err_msg("binary search should have found entry %d", index));
1020 #endif
1021 return NULL;
1022 } else if (res < 0) {
1023 l = mid + 1;
1024 } else {
1025 h = mid - 1;
1026 }
1027 }
1028 #ifdef ASSERT
1029 int index = linear_search(methods, name, signature);
1030 assert(index == -1, err_msg("binary search should have found entry %d", index));
1031 #endif
1032 return NULL;
1033 }
1035 methodOop instanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
1036 klassOop klass = as_klassOop();
1037 while (klass != NULL) {
1038 methodOop method = instanceKlass::cast(klass)->find_method(name, signature);
1039 if (method != NULL) return method;
1040 klass = instanceKlass::cast(klass)->super();
1041 }
1042 return NULL;
1043 }
1045 // lookup a method in all the interfaces that this class implements
1046 methodOop instanceKlass::lookup_method_in_all_interfaces(Symbol* name,
1047 Symbol* signature) const {
1048 objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces();
1049 int num_ifs = all_ifs->length();
1050 instanceKlass *ik = NULL;
1051 for (int i = 0; i < num_ifs; i++) {
1052 ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i)));
1053 methodOop m = ik->lookup_method(name, signature);
1054 if (m != NULL) {
1055 return m;
1056 }
1057 }
1058 return NULL;
1059 }
1061 /* jni_id_for_impl for jfieldIds only */
1062 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
1063 MutexLocker ml(JfieldIdCreation_lock);
1064 // Retry lookup after we got the lock
1065 JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
1066 if (probe == NULL) {
1067 // Slow case, allocate new static field identifier
1068 probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids());
1069 this_oop->set_jni_ids(probe);
1070 }
1071 return probe;
1072 }
1075 /* jni_id_for for jfieldIds only */
1076 JNIid* instanceKlass::jni_id_for(int offset) {
1077 JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset);
1078 if (probe == NULL) {
1079 probe = jni_id_for_impl(this->as_klassOop(), offset);
1080 }
1081 return probe;
1082 }
1085 // Lookup or create a jmethodID.
1086 // This code is called by the VMThread and JavaThreads so the
1087 // locking has to be done very carefully to avoid deadlocks
1088 // and/or other cache consistency problems.
1089 //
1090 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
1091 size_t idnum = (size_t)method_h->method_idnum();
1092 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1093 size_t length = 0;
1094 jmethodID id = NULL;
1096 // We use a double-check locking idiom here because this cache is
1097 // performance sensitive. In the normal system, this cache only
1098 // transitions from NULL to non-NULL which is safe because we use
1099 // release_set_methods_jmethod_ids() to advertise the new cache.
1100 // A partially constructed cache should never be seen by a racing
1101 // thread. We also use release_store_ptr() to save a new jmethodID
1102 // in the cache so a partially constructed jmethodID should never be
1103 // seen either. Cache reads of existing jmethodIDs proceed without a
1104 // lock, but cache writes of a new jmethodID requires uniqueness and
1105 // creation of the cache itself requires no leaks so a lock is
1106 // generally acquired in those two cases.
1107 //
1108 // If the RedefineClasses() API has been used, then this cache can
1109 // grow and we'll have transitions from non-NULL to bigger non-NULL.
1110 // Cache creation requires no leaks and we require safety between all
1111 // cache accesses and freeing of the old cache so a lock is generally
1112 // acquired when the RedefineClasses() API has been used.
1114 if (jmeths != NULL) {
1115 // the cache already exists
1116 if (!ik_h->idnum_can_increment()) {
1117 // the cache can't grow so we can just get the current values
1118 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1119 } else {
1120 // cache can grow so we have to be more careful
1121 if (Threads::number_of_threads() == 0 ||
1122 SafepointSynchronize::is_at_safepoint()) {
1123 // we're single threaded or at a safepoint - no locking needed
1124 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1125 } else {
1126 MutexLocker ml(JmethodIdCreation_lock);
1127 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1128 }
1129 }
1130 }
1131 // implied else:
1132 // we need to allocate a cache so default length and id values are good
1134 if (jmeths == NULL || // no cache yet
1135 length <= idnum || // cache is too short
1136 id == NULL) { // cache doesn't contain entry
1138 // This function can be called by the VMThread so we have to do all
1139 // things that might block on a safepoint before grabbing the lock.
1140 // Otherwise, we can deadlock with the VMThread or have a cache
1141 // consistency issue. These vars keep track of what we might have
1142 // to free after the lock is dropped.
1143 jmethodID to_dealloc_id = NULL;
1144 jmethodID* to_dealloc_jmeths = NULL;
1146 // may not allocate new_jmeths or use it if we allocate it
1147 jmethodID* new_jmeths = NULL;
1148 if (length <= idnum) {
1149 // allocate a new cache that might be used
1150 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
1151 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
1152 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
1153 // cache size is stored in element[0], other elements offset by one
1154 new_jmeths[0] = (jmethodID)size;
1155 }
1157 // allocate a new jmethodID that might be used
1158 jmethodID new_id = NULL;
1159 if (method_h->is_old() && !method_h->is_obsolete()) {
1160 // The method passed in is old (but not obsolete), we need to use the current version
1161 methodOop current_method = ik_h->method_with_idnum((int)idnum);
1162 assert(current_method != NULL, "old and but not obsolete, so should exist");
1163 methodHandle current_method_h(current_method == NULL? method_h() : current_method);
1164 new_id = JNIHandles::make_jmethod_id(current_method_h);
1165 } else {
1166 // It is the current version of the method or an obsolete method,
1167 // use the version passed in
1168 new_id = JNIHandles::make_jmethod_id(method_h);
1169 }
1171 if (Threads::number_of_threads() == 0 ||
1172 SafepointSynchronize::is_at_safepoint()) {
1173 // we're single threaded or at a safepoint - no locking needed
1174 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1175 &to_dealloc_id, &to_dealloc_jmeths);
1176 } else {
1177 MutexLocker ml(JmethodIdCreation_lock);
1178 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1179 &to_dealloc_id, &to_dealloc_jmeths);
1180 }
1182 // The lock has been dropped so we can free resources.
1183 // Free up either the old cache or the new cache if we allocated one.
1184 if (to_dealloc_jmeths != NULL) {
1185 FreeHeap(to_dealloc_jmeths);
1186 }
1187 // free up the new ID since it wasn't needed
1188 if (to_dealloc_id != NULL) {
1189 JNIHandles::destroy_jmethod_id(to_dealloc_id);
1190 }
1191 }
1192 return id;
1193 }
1196 // Common code to fetch the jmethodID from the cache or update the
1197 // cache with the new jmethodID. This function should never do anything
1198 // that causes the caller to go to a safepoint or we can deadlock with
1199 // the VMThread or have cache consistency issues.
1200 //
1201 jmethodID instanceKlass::get_jmethod_id_fetch_or_update(
1202 instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
1203 jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
1204 jmethodID** to_dealloc_jmeths_p) {
1205 assert(new_id != NULL, "sanity check");
1206 assert(to_dealloc_id_p != NULL, "sanity check");
1207 assert(to_dealloc_jmeths_p != NULL, "sanity check");
1208 assert(Threads::number_of_threads() == 0 ||
1209 SafepointSynchronize::is_at_safepoint() ||
1210 JmethodIdCreation_lock->owned_by_self(), "sanity check");
1212 // reacquire the cache - we are locked, single threaded or at a safepoint
1213 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1214 jmethodID id = NULL;
1215 size_t length = 0;
1217 if (jmeths == NULL || // no cache yet
1218 (length = (size_t)jmeths[0]) <= idnum) { // cache is too short
1219 if (jmeths != NULL) {
1220 // copy any existing entries from the old cache
1221 for (size_t index = 0; index < length; index++) {
1222 new_jmeths[index+1] = jmeths[index+1];
1223 }
1224 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete
1225 }
1226 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
1227 } else {
1228 // fetch jmethodID (if any) from the existing cache
1229 id = jmeths[idnum+1];
1230 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete
1231 }
1232 if (id == NULL) {
1233 // No matching jmethodID in the existing cache or we have a new
1234 // cache or we just grew the cache. This cache write is done here
1235 // by the first thread to win the foot race because a jmethodID
1236 // needs to be unique once it is generally available.
1237 id = new_id;
1239 // The jmethodID cache can be read while unlocked so we have to
1240 // make sure the new jmethodID is complete before installing it
1241 // in the cache.
1242 OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
1243 } else {
1244 *to_dealloc_id_p = new_id; // save new id for later delete
1245 }
1246 return id;
1247 }
1250 // Common code to get the jmethodID cache length and the jmethodID
1251 // value at index idnum if there is one.
1252 //
1253 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1254 size_t idnum, size_t *length_p, jmethodID* id_p) {
1255 assert(cache != NULL, "sanity check");
1256 assert(length_p != NULL, "sanity check");
1257 assert(id_p != NULL, "sanity check");
1259 // cache size is stored in element[0], other elements offset by one
1260 *length_p = (size_t)cache[0];
1261 if (*length_p <= idnum) { // cache is too short
1262 *id_p = NULL;
1263 } else {
1264 *id_p = cache[idnum+1]; // fetch jmethodID (if any)
1265 }
1266 }
1269 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles
1270 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
1271 size_t idnum = (size_t)method->method_idnum();
1272 jmethodID* jmeths = methods_jmethod_ids_acquire();
1273 size_t length; // length assigned as debugging crumb
1274 jmethodID id = NULL;
1275 if (jmeths != NULL && // If there is a cache
1276 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough,
1277 id = jmeths[idnum+1]; // Look up the id (may be NULL)
1278 }
1279 return id;
1280 }
1283 // Cache an itable index
1284 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
1285 int* indices = methods_cached_itable_indices_acquire();
1286 int* to_dealloc_indices = NULL;
1288 // We use a double-check locking idiom here because this cache is
1289 // performance sensitive. In the normal system, this cache only
1290 // transitions from NULL to non-NULL which is safe because we use
1291 // release_set_methods_cached_itable_indices() to advertise the
1292 // new cache. A partially constructed cache should never be seen
1293 // by a racing thread. Cache reads and writes proceed without a
1294 // lock, but creation of the cache itself requires no leaks so a
1295 // lock is generally acquired in that case.
1296 //
1297 // If the RedefineClasses() API has been used, then this cache can
1298 // grow and we'll have transitions from non-NULL to bigger non-NULL.
1299 // Cache creation requires no leaks and we require safety between all
1300 // cache accesses and freeing of the old cache so a lock is generally
1301 // acquired when the RedefineClasses() API has been used.
1303 if (indices == NULL || idnum_can_increment()) {
1304 // we need a cache or the cache can grow
1305 MutexLocker ml(JNICachedItableIndex_lock);
1306 // reacquire the cache to see if another thread already did the work
1307 indices = methods_cached_itable_indices_acquire();
1308 size_t length = 0;
1309 // cache size is stored in element[0], other elements offset by one
1310 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
1311 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
1312 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
1313 new_indices[0] = (int)size;
1314 // copy any existing entries
1315 size_t i;
1316 for (i = 0; i < length; i++) {
1317 new_indices[i+1] = indices[i+1];
1318 }
1319 // Set all the rest to -1
1320 for (i = length; i < size; i++) {
1321 new_indices[i+1] = -1;
1322 }
1323 if (indices != NULL) {
1324 // We have an old cache to delete so save it for after we
1325 // drop the lock.
1326 to_dealloc_indices = indices;
1327 }
1328 release_set_methods_cached_itable_indices(indices = new_indices);
1329 }
1331 if (idnum_can_increment()) {
1332 // this cache can grow so we have to write to it safely
1333 indices[idnum+1] = index;
1334 }
1335 } else {
1336 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1337 }
1339 if (!idnum_can_increment()) {
1340 // The cache cannot grow and this JNI itable index value does not
1341 // have to be unique like a jmethodID. If there is a race to set it,
1342 // it doesn't matter.
1343 indices[idnum+1] = index;
1344 }
1346 if (to_dealloc_indices != NULL) {
1347 // we allocated a new cache so free the old one
1348 FreeHeap(to_dealloc_indices);
1349 }
1350 }
1353 // Retrieve a cached itable index
1354 int instanceKlass::cached_itable_index(size_t idnum) {
1355 int* indices = methods_cached_itable_indices_acquire();
1356 if (indices != NULL && ((size_t)indices[0]) > idnum) {
1357 // indices exist and are long enough, retrieve possible cached
1358 return indices[idnum+1];
1359 }
1360 return -1;
1361 }
1364 //
1365 // nmethodBucket is used to record dependent nmethods for
1366 // deoptimization. nmethod dependencies are actually <klass, method>
1367 // pairs but we really only care about the klass part for purposes of
1368 // finding nmethods which might need to be deoptimized. Instead of
1369 // recording the method, a count of how many times a particular nmethod
1370 // was recorded is kept. This ensures that any recording errors are
1371 // noticed since an nmethod should be removed as many times are it's
1372 // added.
1373 //
1374 class nmethodBucket {
1375 private:
1376 nmethod* _nmethod;
1377 int _count;
1378 nmethodBucket* _next;
1380 public:
1381 nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
1382 _nmethod = nmethod;
1383 _next = next;
1384 _count = 1;
1385 }
1386 int count() { return _count; }
1387 int increment() { _count += 1; return _count; }
1388 int decrement() { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
1389 nmethodBucket* next() { return _next; }
1390 void set_next(nmethodBucket* b) { _next = b; }
1391 nmethod* get_nmethod() { return _nmethod; }
1392 };
1395 //
1396 // Walk the list of dependent nmethods searching for nmethods which
1397 // are dependent on the klassOop that was passed in and mark them for
1398 // deoptimization. Returns the number of nmethods found.
1399 //
1400 int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
1401 assert_locked_or_safepoint(CodeCache_lock);
1402 int found = 0;
1403 nmethodBucket* b = _dependencies;
1404 while (b != NULL) {
1405 nmethod* nm = b->get_nmethod();
1406 // since dependencies aren't removed until an nmethod becomes a zombie,
1407 // the dependency list may contain nmethods which aren't alive.
1408 if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
1409 if (TraceDependencies) {
1410 ResourceMark rm;
1411 tty->print_cr("Marked for deoptimization");
1412 tty->print_cr(" context = %s", this->external_name());
1413 changes.print();
1414 nm->print();
1415 nm->print_dependencies();
1416 }
1417 nm->mark_for_deoptimization();
1418 found++;
1419 }
1420 b = b->next();
1421 }
1422 return found;
1423 }
1426 //
1427 // Add an nmethodBucket to the list of dependencies for this nmethod.
1428 // It's possible that an nmethod has multiple dependencies on this klass
1429 // so a count is kept for each bucket to guarantee that creation and
1430 // deletion of dependencies is consistent.
1431 //
1432 void instanceKlass::add_dependent_nmethod(nmethod* nm) {
1433 assert_locked_or_safepoint(CodeCache_lock);
1434 nmethodBucket* b = _dependencies;
1435 nmethodBucket* last = NULL;
1436 while (b != NULL) {
1437 if (nm == b->get_nmethod()) {
1438 b->increment();
1439 return;
1440 }
1441 b = b->next();
1442 }
1443 _dependencies = new nmethodBucket(nm, _dependencies);
1444 }
1447 //
1448 // Decrement count of the nmethod in the dependency list and remove
1449 // the bucket competely when the count goes to 0. This method must
1450 // find a corresponding bucket otherwise there's a bug in the
1451 // recording of dependecies.
1452 //
1453 void instanceKlass::remove_dependent_nmethod(nmethod* nm) {
1454 assert_locked_or_safepoint(CodeCache_lock);
1455 nmethodBucket* b = _dependencies;
1456 nmethodBucket* last = NULL;
1457 while (b != NULL) {
1458 if (nm == b->get_nmethod()) {
1459 if (b->decrement() == 0) {
1460 if (last == NULL) {
1461 _dependencies = b->next();
1462 } else {
1463 last->set_next(b->next());
1464 }
1465 delete b;
1466 }
1467 return;
1468 }
1469 last = b;
1470 b = b->next();
1471 }
1472 #ifdef ASSERT
1473 tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
1474 nm->print();
1475 #endif // ASSERT
1476 ShouldNotReachHere();
1477 }
1480 #ifndef PRODUCT
1481 void instanceKlass::print_dependent_nmethods(bool verbose) {
1482 nmethodBucket* b = _dependencies;
1483 int idx = 0;
1484 while (b != NULL) {
1485 nmethod* nm = b->get_nmethod();
1486 tty->print("[%d] count=%d { ", idx++, b->count());
1487 if (!verbose) {
1488 nm->print_on(tty, "nmethod");
1489 tty->print_cr(" } ");
1490 } else {
1491 nm->print();
1492 nm->print_dependencies();
1493 tty->print_cr("--- } ");
1494 }
1495 b = b->next();
1496 }
1497 }
1500 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
1501 nmethodBucket* b = _dependencies;
1502 while (b != NULL) {
1503 if (nm == b->get_nmethod()) {
1504 return true;
1505 }
1506 b = b->next();
1507 }
1508 return false;
1509 }
1510 #endif //PRODUCT
1513 #ifdef ASSERT
1514 template <class T> void assert_is_in(T *p) {
1515 T heap_oop = oopDesc::load_heap_oop(p);
1516 if (!oopDesc::is_null(heap_oop)) {
1517 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1518 assert(Universe::heap()->is_in(o), "should be in heap");
1519 }
1520 }
1521 template <class T> void assert_is_in_closed_subset(T *p) {
1522 T heap_oop = oopDesc::load_heap_oop(p);
1523 if (!oopDesc::is_null(heap_oop)) {
1524 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1525 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
1526 }
1527 }
1528 template <class T> void assert_is_in_reserved(T *p) {
1529 T heap_oop = oopDesc::load_heap_oop(p);
1530 if (!oopDesc::is_null(heap_oop)) {
1531 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1532 assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
1533 }
1534 }
1535 template <class T> void assert_nothing(T *p) {}
1537 #else
1538 template <class T> void assert_is_in(T *p) {}
1539 template <class T> void assert_is_in_closed_subset(T *p) {}
1540 template <class T> void assert_is_in_reserved(T *p) {}
1541 template <class T> void assert_nothing(T *p) {}
1542 #endif // ASSERT
1544 //
1545 // Macros that iterate over areas of oops which are specialized on type of
1546 // oop pointer either narrow or wide, depending on UseCompressedOops
1547 //
1548 // Parameters are:
1549 // T - type of oop to point to (either oop or narrowOop)
1550 // start_p - starting pointer for region to iterate over
1551 // count - number of oops or narrowOops to iterate over
1552 // do_oop - action to perform on each oop (it's arbitrary C code which
1553 // makes it more efficient to put in a macro rather than making
1554 // it a template function)
1555 // assert_fn - assert function which is template function because performance
1556 // doesn't matter when enabled.
1557 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
1558 T, start_p, count, do_oop, \
1559 assert_fn) \
1560 { \
1561 T* p = (T*)(start_p); \
1562 T* const end = p + (count); \
1563 while (p < end) { \
1564 (assert_fn)(p); \
1565 do_oop; \
1566 ++p; \
1567 } \
1568 }
1570 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
1571 T, start_p, count, do_oop, \
1572 assert_fn) \
1573 { \
1574 T* const start = (T*)(start_p); \
1575 T* p = start + (count); \
1576 while (start < p) { \
1577 --p; \
1578 (assert_fn)(p); \
1579 do_oop; \
1580 } \
1581 }
1583 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
1584 T, start_p, count, low, high, \
1585 do_oop, assert_fn) \
1586 { \
1587 T* const l = (T*)(low); \
1588 T* const h = (T*)(high); \
1589 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
1590 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
1591 "bounded region must be properly aligned"); \
1592 T* p = (T*)(start_p); \
1593 T* end = p + (count); \
1594 if (p < l) p = l; \
1595 if (end > h) end = h; \
1596 while (p < end) { \
1597 (assert_fn)(p); \
1598 do_oop; \
1599 ++p; \
1600 } \
1601 }
1604 // The following macros call specialized macros, passing either oop or
1605 // narrowOop as the specialization type. These test the UseCompressedOops
1606 // flag.
1607 #define InstanceKlass_OOP_ITERATE(start_p, count, \
1608 do_oop, assert_fn) \
1609 { \
1610 if (UseCompressedOops) { \
1611 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1612 start_p, count, \
1613 do_oop, assert_fn) \
1614 } else { \
1615 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1616 start_p, count, \
1617 do_oop, assert_fn) \
1618 } \
1619 }
1621 #define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
1622 do_oop, assert_fn) \
1623 { \
1624 if (UseCompressedOops) { \
1625 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1626 start_p, count, \
1627 low, high, \
1628 do_oop, assert_fn) \
1629 } else { \
1630 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1631 start_p, count, \
1632 low, high, \
1633 do_oop, assert_fn) \
1634 } \
1635 }
1637 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \
1638 { \
1639 /* Compute oopmap block range. The common case \
1640 is nonstatic_oop_map_size == 1. */ \
1641 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1642 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
1643 if (UseCompressedOops) { \
1644 while (map < end_map) { \
1645 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1646 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1647 do_oop, assert_fn) \
1648 ++map; \
1649 } \
1650 } else { \
1651 while (map < end_map) { \
1652 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1653 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1654 do_oop, assert_fn) \
1655 ++map; \
1656 } \
1657 } \
1658 }
1660 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \
1661 { \
1662 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \
1663 OopMapBlock* map = start_map + nonstatic_oop_map_count(); \
1664 if (UseCompressedOops) { \
1665 while (start_map < map) { \
1666 --map; \
1667 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \
1668 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1669 do_oop, assert_fn) \
1670 } \
1671 } else { \
1672 while (start_map < map) { \
1673 --map; \
1674 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \
1675 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1676 do_oop, assert_fn) \
1677 } \
1678 } \
1679 }
1681 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \
1682 assert_fn) \
1683 { \
1684 /* Compute oopmap block range. The common case is \
1685 nonstatic_oop_map_size == 1, so we accept the \
1686 usually non-existent extra overhead of examining \
1687 all the maps. */ \
1688 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1689 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
1690 if (UseCompressedOops) { \
1691 while (map < end_map) { \
1692 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1693 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1694 low, high, \
1695 do_oop, assert_fn) \
1696 ++map; \
1697 } \
1698 } else { \
1699 while (map < end_map) { \
1700 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1701 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1702 low, high, \
1703 do_oop, assert_fn) \
1704 ++map; \
1705 } \
1706 } \
1707 }
1709 void instanceKlass::follow_static_fields() {
1710 InstanceKlass_OOP_ITERATE( \
1711 start_of_static_fields(), static_oop_field_size(), \
1712 MarkSweep::mark_and_push(p), \
1713 assert_is_in_closed_subset)
1714 }
1716 #ifndef SERIALGC
1717 void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
1718 InstanceKlass_OOP_ITERATE( \
1719 start_of_static_fields(), static_oop_field_size(), \
1720 PSParallelCompact::mark_and_push(cm, p), \
1721 assert_is_in)
1722 }
1723 #endif // SERIALGC
1725 void instanceKlass::adjust_static_fields() {
1726 InstanceKlass_OOP_ITERATE( \
1727 start_of_static_fields(), static_oop_field_size(), \
1728 MarkSweep::adjust_pointer(p), \
1729 assert_nothing)
1730 }
1732 #ifndef SERIALGC
1733 void instanceKlass::update_static_fields() {
1734 InstanceKlass_OOP_ITERATE( \
1735 start_of_static_fields(), static_oop_field_size(), \
1736 PSParallelCompact::adjust_pointer(p), \
1737 assert_nothing)
1738 }
1739 #endif // SERIALGC
1741 void instanceKlass::oop_follow_contents(oop obj) {
1742 assert(obj != NULL, "can't follow the content of NULL object");
1743 obj->follow_header();
1744 InstanceKlass_OOP_MAP_ITERATE( \
1745 obj, \
1746 MarkSweep::mark_and_push(p), \
1747 assert_is_in_closed_subset)
1748 }
1750 #ifndef SERIALGC
1751 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
1752 oop obj) {
1753 assert(obj != NULL, "can't follow the content of NULL object");
1754 obj->follow_header(cm);
1755 InstanceKlass_OOP_MAP_ITERATE( \
1756 obj, \
1757 PSParallelCompact::mark_and_push(cm, p), \
1758 assert_is_in)
1759 }
1760 #endif // SERIALGC
1762 // closure's do_header() method dicates whether the given closure should be
1763 // applied to the klass ptr in the object header.
1765 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
1766 \
1767 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
1768 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1769 /* header */ \
1770 if (closure->do_header()) { \
1771 obj->oop_iterate_header(closure); \
1772 } \
1773 InstanceKlass_OOP_MAP_ITERATE( \
1774 obj, \
1775 SpecializationStats:: \
1776 record_do_oop_call##nv_suffix(SpecializationStats::ik); \
1777 (closure)->do_oop##nv_suffix(p), \
1778 assert_is_in_closed_subset) \
1779 return size_helper(); \
1780 }
1782 #ifndef SERIALGC
1783 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
1784 \
1785 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \
1786 OopClosureType* closure) { \
1787 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1788 /* header */ \
1789 if (closure->do_header()) { \
1790 obj->oop_iterate_header(closure); \
1791 } \
1792 /* instance variables */ \
1793 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1794 obj, \
1795 SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
1796 (closure)->do_oop##nv_suffix(p), \
1797 assert_is_in_closed_subset) \
1798 return size_helper(); \
1799 }
1800 #endif // !SERIALGC
1802 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1803 \
1804 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
1805 OopClosureType* closure, \
1806 MemRegion mr) { \
1807 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1808 if (closure->do_header()) { \
1809 obj->oop_iterate_header(closure, mr); \
1810 } \
1811 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1812 obj, mr.start(), mr.end(), \
1813 (closure)->do_oop##nv_suffix(p), \
1814 assert_is_in_closed_subset) \
1815 return size_helper(); \
1816 }
1818 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1819 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1820 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1821 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1822 #ifndef SERIALGC
1823 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1824 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1825 #endif // !SERIALGC
1827 void instanceKlass::iterate_static_fields(OopClosure* closure) {
1828 InstanceKlass_OOP_ITERATE( \
1829 start_of_static_fields(), static_oop_field_size(), \
1830 closure->do_oop(p), \
1831 assert_is_in_reserved)
1832 }
1834 void instanceKlass::iterate_static_fields(OopClosure* closure,
1835 MemRegion mr) {
1836 InstanceKlass_BOUNDED_OOP_ITERATE( \
1837 start_of_static_fields(), static_oop_field_size(), \
1838 mr.start(), mr.end(), \
1839 (closure)->do_oop_v(p), \
1840 assert_is_in_closed_subset)
1841 }
1843 int instanceKlass::oop_adjust_pointers(oop obj) {
1844 int size = size_helper();
1845 InstanceKlass_OOP_MAP_ITERATE( \
1846 obj, \
1847 MarkSweep::adjust_pointer(p), \
1848 assert_is_in)
1849 obj->adjust_header();
1850 return size;
1851 }
1853 #ifndef SERIALGC
1854 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
1855 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1856 obj, \
1857 if (PSScavenge::should_scavenge(p)) { \
1858 pm->claim_or_forward_depth(p); \
1859 }, \
1860 assert_nothing )
1861 }
1863 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
1864 InstanceKlass_OOP_MAP_ITERATE( \
1865 obj, \
1866 PSParallelCompact::adjust_pointer(p), \
1867 assert_nothing)
1868 return size_helper();
1869 }
1871 void instanceKlass::push_static_fields(PSPromotionManager* pm) {
1872 InstanceKlass_OOP_ITERATE( \
1873 start_of_static_fields(), static_oop_field_size(), \
1874 if (PSScavenge::should_scavenge(p)) { \
1875 pm->claim_or_forward_depth(p); \
1876 }, \
1877 assert_nothing )
1878 }
1880 void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
1881 InstanceKlass_OOP_ITERATE( \
1882 start_of_static_fields(), static_oop_field_size(), \
1883 PSParallelCompact::adjust_pointer(p), \
1884 assert_is_in)
1885 }
1886 #endif // SERIALGC
1888 // This klass is alive but the implementor link is not followed/updated.
1889 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
1891 void instanceKlass::follow_weak_klass_links(
1892 BoolObjectClosure* is_alive, OopClosure* keep_alive) {
1893 assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
1894 if (ClassUnloading) {
1895 for (int i = 0; i < implementors_limit; i++) {
1896 klassOop impl = _implementors[i];
1897 if (impl == NULL) break; // no more in the list
1898 if (!is_alive->do_object_b(impl)) {
1899 // remove this guy from the list by overwriting him with the tail
1900 int lasti = --_nof_implementors;
1901 assert(lasti >= i && lasti < implementors_limit, "just checking");
1902 _implementors[i] = _implementors[lasti];
1903 _implementors[lasti] = NULL;
1904 --i; // rerun the loop at this index
1905 }
1906 }
1907 } else {
1908 for (int i = 0; i < implementors_limit; i++) {
1909 keep_alive->do_oop(&adr_implementors()[i]);
1910 }
1911 }
1912 Klass::follow_weak_klass_links(is_alive, keep_alive);
1913 }
1915 void instanceKlass::remove_unshareable_info() {
1916 Klass::remove_unshareable_info();
1917 init_implementor();
1918 }
1920 static void clear_all_breakpoints(methodOop m) {
1921 m->clear_all_breakpoints();
1922 }
1924 void instanceKlass::release_C_heap_structures() {
1925 // Deallocate oop map cache
1926 if (_oop_map_cache != NULL) {
1927 delete _oop_map_cache;
1928 _oop_map_cache = NULL;
1929 }
1931 // Deallocate JNI identifiers for jfieldIDs
1932 JNIid::deallocate(jni_ids());
1933 set_jni_ids(NULL);
1935 jmethodID* jmeths = methods_jmethod_ids_acquire();
1936 if (jmeths != (jmethodID*)NULL) {
1937 release_set_methods_jmethod_ids(NULL);
1938 FreeHeap(jmeths);
1939 }
1941 int* indices = methods_cached_itable_indices_acquire();
1942 if (indices != (int*)NULL) {
1943 release_set_methods_cached_itable_indices(NULL);
1944 FreeHeap(indices);
1945 }
1947 // release dependencies
1948 nmethodBucket* b = _dependencies;
1949 _dependencies = NULL;
1950 while (b != NULL) {
1951 nmethodBucket* next = b->next();
1952 delete b;
1953 b = next;
1954 }
1956 // Deallocate breakpoint records
1957 if (breakpoints() != 0x0) {
1958 methods_do(clear_all_breakpoints);
1959 assert(breakpoints() == 0x0, "should have cleared breakpoints");
1960 }
1962 // deallocate information about previous versions
1963 if (_previous_versions != NULL) {
1964 for (int i = _previous_versions->length() - 1; i >= 0; i--) {
1965 PreviousVersionNode * pv_node = _previous_versions->at(i);
1966 delete pv_node;
1967 }
1968 delete _previous_versions;
1969 _previous_versions = NULL;
1970 }
1972 // deallocate the cached class file
1973 if (_cached_class_file_bytes != NULL) {
1974 os::free(_cached_class_file_bytes);
1975 _cached_class_file_bytes = NULL;
1976 _cached_class_file_len = 0;
1977 }
1979 // Decrement symbol reference counts associated with the unloaded class.
1980 if (_name != NULL) _name->decrement_refcount();
1981 // unreference array name derived from this class name (arrays of an unloaded
1982 // class can't be referenced anymore).
1983 if (_array_name != NULL) _array_name->decrement_refcount();
1984 if (_source_file_name != NULL) _source_file_name->decrement_refcount();
1985 if (_source_debug_extension != NULL) _source_debug_extension->decrement_refcount();
1986 // walk constant pool and decrement symbol reference counts
1987 _constants->unreference_symbols();
1988 }
1990 void instanceKlass::set_source_file_name(Symbol* n) {
1991 _source_file_name = n;
1992 if (_source_file_name != NULL) _source_file_name->increment_refcount();
1993 }
1995 void instanceKlass::set_source_debug_extension(Symbol* n) {
1996 _source_debug_extension = n;
1997 if (_source_debug_extension != NULL) _source_debug_extension->increment_refcount();
1998 }
2000 const char* instanceKlass::signature_name() const {
2001 const char* src = (const char*) (name()->as_C_string());
2002 const int src_length = (int)strlen(src);
2003 char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
2004 int src_index = 0;
2005 int dest_index = 0;
2006 dest[dest_index++] = 'L';
2007 while (src_index < src_length) {
2008 dest[dest_index++] = src[src_index++];
2009 }
2010 dest[dest_index++] = ';';
2011 dest[dest_index] = '\0';
2012 return dest;
2013 }
2015 // different verisons of is_same_class_package
2016 bool instanceKlass::is_same_class_package(klassOop class2) {
2017 klassOop class1 = as_klassOop();
2018 oop classloader1 = instanceKlass::cast(class1)->class_loader();
2019 Symbol* classname1 = Klass::cast(class1)->name();
2021 if (Klass::cast(class2)->oop_is_objArray()) {
2022 class2 = objArrayKlass::cast(class2)->bottom_klass();
2023 }
2024 oop classloader2;
2025 if (Klass::cast(class2)->oop_is_instance()) {
2026 classloader2 = instanceKlass::cast(class2)->class_loader();
2027 } else {
2028 assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array");
2029 classloader2 = NULL;
2030 }
2031 Symbol* classname2 = Klass::cast(class2)->name();
2033 return instanceKlass::is_same_class_package(classloader1, classname1,
2034 classloader2, classname2);
2035 }
2037 bool instanceKlass::is_same_class_package(oop classloader2, Symbol* classname2) {
2038 klassOop class1 = as_klassOop();
2039 oop classloader1 = instanceKlass::cast(class1)->class_loader();
2040 Symbol* classname1 = Klass::cast(class1)->name();
2042 return instanceKlass::is_same_class_package(classloader1, classname1,
2043 classloader2, classname2);
2044 }
2046 // return true if two classes are in the same package, classloader
2047 // and classname information is enough to determine a class's package
2048 bool instanceKlass::is_same_class_package(oop class_loader1, Symbol* class_name1,
2049 oop class_loader2, Symbol* class_name2) {
2050 if (class_loader1 != class_loader2) {
2051 return false;
2052 } else if (class_name1 == class_name2) {
2053 return true; // skip painful bytewise comparison
2054 } else {
2055 ResourceMark rm;
2057 // The Symbol*'s are in UTF8 encoding. Since we only need to check explicitly
2058 // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
2059 // Otherwise, we just compare jbyte values between the strings.
2060 const jbyte *name1 = class_name1->base();
2061 const jbyte *name2 = class_name2->base();
2063 const jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
2064 const jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
2066 if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
2067 // One of the two doesn't have a package. Only return true
2068 // if the other one also doesn't have a package.
2069 return last_slash1 == last_slash2;
2070 } else {
2071 // Skip over '['s
2072 if (*name1 == '[') {
2073 do {
2074 name1++;
2075 } while (*name1 == '[');
2076 if (*name1 != 'L') {
2077 // Something is terribly wrong. Shouldn't be here.
2078 return false;
2079 }
2080 }
2081 if (*name2 == '[') {
2082 do {
2083 name2++;
2084 } while (*name2 == '[');
2085 if (*name2 != 'L') {
2086 // Something is terribly wrong. Shouldn't be here.
2087 return false;
2088 }
2089 }
2091 // Check that package part is identical
2092 int length1 = last_slash1 - name1;
2093 int length2 = last_slash2 - name2;
2095 return UTF8::equal(name1, length1, name2, length2);
2096 }
2097 }
2098 }
2100 // Returns true iff super_method can be overridden by a method in targetclassname
2101 // See JSL 3rd edition 8.4.6.1
2102 // Assumes name-signature match
2103 // "this" is instanceKlass of super_method which must exist
2104 // note that the instanceKlass of the method in the targetclassname has not always been created yet
2105 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS) {
2106 // Private methods can not be overridden
2107 if (super_method->is_private()) {
2108 return false;
2109 }
2110 // If super method is accessible, then override
2111 if ((super_method->is_protected()) ||
2112 (super_method->is_public())) {
2113 return true;
2114 }
2115 // Package-private methods are not inherited outside of package
2116 assert(super_method->is_package_private(), "must be package private");
2117 return(is_same_class_package(targetclassloader(), targetclassname));
2118 }
2120 /* defined for now in jvm.cpp, for historical reasons *--
2121 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self,
2122 Symbol*& simple_name_result, TRAPS) {
2123 ...
2124 }
2125 */
2127 // tell if two classes have the same enclosing class (at package level)
2128 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
2129 klassOop class2_oop, TRAPS) {
2130 if (class2_oop == class1->as_klassOop()) return true;
2131 if (!Klass::cast(class2_oop)->oop_is_instance()) return false;
2132 instanceKlassHandle class2(THREAD, class2_oop);
2134 // must be in same package before we try anything else
2135 if (!class1->is_same_class_package(class2->class_loader(), class2->name()))
2136 return false;
2138 // As long as there is an outer1.getEnclosingClass,
2139 // shift the search outward.
2140 instanceKlassHandle outer1 = class1;
2141 for (;;) {
2142 // As we walk along, look for equalities between outer1 and class2.
2143 // Eventually, the walks will terminate as outer1 stops
2144 // at the top-level class around the original class.
2145 bool ignore_inner_is_member;
2146 klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
2147 CHECK_false);
2148 if (next == NULL) break;
2149 if (next == class2()) return true;
2150 outer1 = instanceKlassHandle(THREAD, next);
2151 }
2153 // Now do the same for class2.
2154 instanceKlassHandle outer2 = class2;
2155 for (;;) {
2156 bool ignore_inner_is_member;
2157 klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
2158 CHECK_false);
2159 if (next == NULL) break;
2160 // Might as well check the new outer against all available values.
2161 if (next == class1()) return true;
2162 if (next == outer1()) return true;
2163 outer2 = instanceKlassHandle(THREAD, next);
2164 }
2166 // If by this point we have not found an equality between the
2167 // two classes, we know they are in separate package members.
2168 return false;
2169 }
2172 jint instanceKlass::compute_modifier_flags(TRAPS) const {
2173 klassOop k = as_klassOop();
2174 jint access = access_flags().as_int();
2176 // But check if it happens to be member class.
2177 typeArrayOop inner_class_list = inner_classes();
2178 int length = (inner_class_list == NULL) ? 0 : inner_class_list->length();
2179 assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
2180 if (length > 0) {
2181 typeArrayHandle inner_class_list_h(THREAD, inner_class_list);
2182 instanceKlassHandle ik(THREAD, k);
2183 for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
2184 int ioff = inner_class_list_h->ushort_at(
2185 i + instanceKlass::inner_class_inner_class_info_offset);
2187 // Inner class attribute can be zero, skip it.
2188 // Strange but true: JVM spec. allows null inner class refs.
2189 if (ioff == 0) continue;
2191 // only look at classes that are already loaded
2192 // since we are looking for the flags for our self.
2193 Symbol* inner_name = ik->constants()->klass_name_at(ioff);
2194 if ((ik->name() == inner_name)) {
2195 // This is really a member class.
2196 access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset);
2197 break;
2198 }
2199 }
2200 }
2201 // Remember to strip ACC_SUPER bit
2202 return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS;
2203 }
2205 jint instanceKlass::jvmti_class_status() const {
2206 jint result = 0;
2208 if (is_linked()) {
2209 result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED;
2210 }
2212 if (is_initialized()) {
2213 assert(is_linked(), "Class status is not consistent");
2214 result |= JVMTI_CLASS_STATUS_INITIALIZED;
2215 }
2216 if (is_in_error_state()) {
2217 result |= JVMTI_CLASS_STATUS_ERROR;
2218 }
2219 return result;
2220 }
2222 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) {
2223 itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
2224 int method_table_offset_in_words = ioe->offset()/wordSize;
2225 int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
2226 / itableOffsetEntry::size();
2228 for (int cnt = 0 ; ; cnt ++, ioe ++) {
2229 // If the interface isn't implemented by the receiver class,
2230 // the VM should throw IncompatibleClassChangeError.
2231 if (cnt >= nof_interfaces) {
2232 THROW_0(vmSymbols::java_lang_IncompatibleClassChangeError());
2233 }
2235 klassOop ik = ioe->interface_klass();
2236 if (ik == holder) break;
2237 }
2239 itableMethodEntry* ime = ioe->first_method_entry(as_klassOop());
2240 methodOop m = ime[index].method();
2241 if (m == NULL) {
2242 THROW_0(vmSymbols::java_lang_AbstractMethodError());
2243 }
2244 return m;
2245 }
2247 // On-stack replacement stuff
2248 void instanceKlass::add_osr_nmethod(nmethod* n) {
2249 // only one compilation can be active
2250 NEEDS_CLEANUP
2251 // This is a short non-blocking critical region, so the no safepoint check is ok.
2252 OsrList_lock->lock_without_safepoint_check();
2253 assert(n->is_osr_method(), "wrong kind of nmethod");
2254 n->set_osr_link(osr_nmethods_head());
2255 set_osr_nmethods_head(n);
2256 // Raise the highest osr level if necessary
2257 if (TieredCompilation) {
2258 methodOop m = n->method();
2259 m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
2260 }
2261 // Remember to unlock again
2262 OsrList_lock->unlock();
2264 // Get rid of the osr methods for the same bci that have lower levels.
2265 if (TieredCompilation) {
2266 for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
2267 nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
2268 if (inv != NULL && inv->is_in_use()) {
2269 inv->make_not_entrant();
2270 }
2271 }
2272 }
2273 }
2276 void instanceKlass::remove_osr_nmethod(nmethod* n) {
2277 // This is a short non-blocking critical region, so the no safepoint check is ok.
2278 OsrList_lock->lock_without_safepoint_check();
2279 assert(n->is_osr_method(), "wrong kind of nmethod");
2280 nmethod* last = NULL;
2281 nmethod* cur = osr_nmethods_head();
2282 int max_level = CompLevel_none; // Find the max comp level excluding n
2283 methodOop m = n->method();
2284 // Search for match
2285 while(cur != NULL && cur != n) {
2286 if (TieredCompilation) {
2287 // Find max level before n
2288 max_level = MAX2(max_level, cur->comp_level());
2289 }
2290 last = cur;
2291 cur = cur->osr_link();
2292 }
2293 nmethod* next = NULL;
2294 if (cur == n) {
2295 next = cur->osr_link();
2296 if (last == NULL) {
2297 // Remove first element
2298 set_osr_nmethods_head(next);
2299 } else {
2300 last->set_osr_link(next);
2301 }
2302 }
2303 n->set_osr_link(NULL);
2304 if (TieredCompilation) {
2305 cur = next;
2306 while (cur != NULL) {
2307 // Find max level after n
2308 max_level = MAX2(max_level, cur->comp_level());
2309 cur = cur->osr_link();
2310 }
2311 m->set_highest_osr_comp_level(max_level);
2312 }
2313 // Remember to unlock again
2314 OsrList_lock->unlock();
2315 }
2317 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
2318 // This is a short non-blocking critical region, so the no safepoint check is ok.
2319 OsrList_lock->lock_without_safepoint_check();
2320 nmethod* osr = osr_nmethods_head();
2321 nmethod* best = NULL;
2322 while (osr != NULL) {
2323 assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
2324 // There can be a time when a c1 osr method exists but we are waiting
2325 // for a c2 version. When c2 completes its osr nmethod we will trash
2326 // the c1 version and only be able to find the c2 version. However
2327 // while we overflow in the c1 code at back branches we don't want to
2328 // try and switch to the same code as we are already running
2330 if (osr->method() == m &&
2331 (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
2332 if (match_level) {
2333 if (osr->comp_level() == comp_level) {
2334 // Found a match - return it.
2335 OsrList_lock->unlock();
2336 return osr;
2337 }
2338 } else {
2339 if (best == NULL || (osr->comp_level() > best->comp_level())) {
2340 if (osr->comp_level() == CompLevel_highest_tier) {
2341 // Found the best possible - return it.
2342 OsrList_lock->unlock();
2343 return osr;
2344 }
2345 best = osr;
2346 }
2347 }
2348 }
2349 osr = osr->osr_link();
2350 }
2351 OsrList_lock->unlock();
2352 if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
2353 return best;
2354 }
2355 return NULL;
2356 }
2358 // -----------------------------------------------------------------------------------------------------
2359 #ifndef PRODUCT
2361 // Printing
2363 #define BULLET " - "
2365 void FieldPrinter::do_field(fieldDescriptor* fd) {
2366 _st->print(BULLET);
2367 if (fd->is_static() || (_obj == NULL)) {
2368 fd->print_on(_st);
2369 _st->cr();
2370 } else {
2371 fd->print_on_for(_st, _obj);
2372 _st->cr();
2373 }
2374 }
2377 void instanceKlass::oop_print_on(oop obj, outputStream* st) {
2378 Klass::oop_print_on(obj, st);
2380 if (as_klassOop() == SystemDictionary::String_klass()) {
2381 typeArrayOop value = java_lang_String::value(obj);
2382 juint offset = java_lang_String::offset(obj);
2383 juint length = java_lang_String::length(obj);
2384 if (value != NULL &&
2385 value->is_typeArray() &&
2386 offset <= (juint) value->length() &&
2387 offset + length <= (juint) value->length()) {
2388 st->print(BULLET"string: ");
2389 Handle h_obj(obj);
2390 java_lang_String::print(h_obj, st);
2391 st->cr();
2392 if (!WizardMode) return; // that is enough
2393 }
2394 }
2396 st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj));
2397 FieldPrinter print_nonstatic_field(st, obj);
2398 do_nonstatic_fields(&print_nonstatic_field);
2400 if (as_klassOop() == SystemDictionary::Class_klass()) {
2401 st->print(BULLET"signature: ");
2402 java_lang_Class::print_signature(obj, st);
2403 st->cr();
2404 klassOop mirrored_klass = java_lang_Class::as_klassOop(obj);
2405 st->print(BULLET"fake entry for mirror: ");
2406 mirrored_klass->print_value_on(st);
2407 st->cr();
2408 st->print(BULLET"fake entry resolved_constructor: ");
2409 methodOop ctor = java_lang_Class::resolved_constructor(obj);
2410 ctor->print_value_on(st);
2411 klassOop array_klass = java_lang_Class::array_klass(obj);
2412 st->cr();
2413 st->print(BULLET"fake entry for array: ");
2414 array_klass->print_value_on(st);
2415 st->cr();
2416 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2417 st->print(BULLET"signature: ");
2418 java_dyn_MethodType::print_signature(obj, st);
2419 st->cr();
2420 }
2421 }
2423 #endif //PRODUCT
2425 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
2426 st->print("a ");
2427 name()->print_value_on(st);
2428 obj->print_address_on(st);
2429 if (as_klassOop() == SystemDictionary::String_klass()
2430 && java_lang_String::value(obj) != NULL) {
2431 ResourceMark rm;
2432 int len = java_lang_String::length(obj);
2433 int plen = (len < 24 ? len : 12);
2434 char* str = java_lang_String::as_utf8_string(obj, 0, plen);
2435 st->print(" = \"%s\"", str);
2436 if (len > plen)
2437 st->print("...[%d]", len);
2438 } else if (as_klassOop() == SystemDictionary::Class_klass()) {
2439 klassOop k = java_lang_Class::as_klassOop(obj);
2440 st->print(" = ");
2441 if (k != NULL) {
2442 k->print_value_on(st);
2443 } else {
2444 const char* tname = type2name(java_lang_Class::primitive_type(obj));
2445 st->print("%s", tname ? tname : "type?");
2446 }
2447 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2448 st->print(" = ");
2449 java_dyn_MethodType::print_signature(obj, st);
2450 } else if (java_lang_boxing_object::is_instance(obj)) {
2451 st->print(" = ");
2452 java_lang_boxing_object::print(obj, st);
2453 }
2454 }
2456 const char* instanceKlass::internal_name() const {
2457 return external_name();
2458 }
2460 // Verification
2462 class VerifyFieldClosure: public OopClosure {
2463 protected:
2464 template <class T> void do_oop_work(T* p) {
2465 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
2466 oop obj = oopDesc::load_decode_heap_oop(p);
2467 if (!obj->is_oop_or_null()) {
2468 tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
2469 Universe::print();
2470 guarantee(false, "boom");
2471 }
2472 }
2473 public:
2474 virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); }
2475 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
2476 };
2478 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
2479 Klass::oop_verify_on(obj, st);
2480 VerifyFieldClosure blk;
2481 oop_oop_iterate(obj, &blk);
2482 }
2484 #ifndef PRODUCT
2486 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
2487 // This verification code is disabled. JDK_Version::is_gte_jdk14x_version()
2488 // cannot be called since this function is called before the VM is
2489 // able to determine what JDK version is running with.
2490 // The check below always is false since 1.4.
2491 return;
2493 // This verification code temporarily disabled for the 1.4
2494 // reflection implementation since java.lang.Class now has
2495 // Java-level instance fields. Should rewrite this to handle this
2496 // case.
2497 if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
2498 // Verify that java.lang.Class instances have a fake oop field added.
2499 instanceKlass* ik = instanceKlass::cast(k);
2501 // Check that we have the right class
2502 static bool first_time = true;
2503 guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
2504 first_time = false;
2505 const int extra = java_lang_Class::number_of_fake_oop_fields;
2506 guarantee(ik->nonstatic_field_size() == extra, "just checking");
2507 guarantee(ik->nonstatic_oop_map_count() == 1, "just checking");
2508 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
2510 // Check that the map is (2,extra)
2511 int offset = java_lang_Class::klass_offset;
2513 OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
2514 guarantee(map->offset() == offset && map->count() == (unsigned int) extra,
2515 "sanity");
2516 }
2517 }
2519 #endif // ndef PRODUCT
2521 // JNIid class for jfieldIDs only
2522 // Note to reviewers:
2523 // These JNI functions are just moved over to column 1 and not changed
2524 // in the compressed oops workspace.
2525 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
2526 _holder = holder;
2527 _offset = offset;
2528 _next = next;
2529 debug_only(_is_static_field_id = false;)
2530 }
2533 JNIid* JNIid::find(int offset) {
2534 JNIid* current = this;
2535 while (current != NULL) {
2536 if (current->offset() == offset) return current;
2537 current = current->next();
2538 }
2539 return NULL;
2540 }
2542 void JNIid::oops_do(OopClosure* f) {
2543 for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
2544 f->do_oop(cur->holder_addr());
2545 }
2546 }
2548 void JNIid::deallocate(JNIid* current) {
2549 while (current != NULL) {
2550 JNIid* next = current->next();
2551 delete current;
2552 current = next;
2553 }
2554 }
2557 void JNIid::verify(klassOop holder) {
2558 int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
2559 int end_field_offset;
2560 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
2562 JNIid* current = this;
2563 while (current != NULL) {
2564 guarantee(current->holder() == holder, "Invalid klass in JNIid");
2565 #ifdef ASSERT
2566 int o = current->offset();
2567 if (current->is_static_field_id()) {
2568 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
2569 }
2570 #endif
2571 current = current->next();
2572 }
2573 }
2576 #ifdef ASSERT
2577 void instanceKlass::set_init_state(ClassState state) {
2578 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
2579 : (_init_state < state);
2580 assert(good_state || state == allocated, "illegal state transition");
2581 _init_state = state;
2582 }
2583 #endif
2586 // RedefineClasses() support for previous versions:
2588 // Add an information node that contains weak references to the
2589 // interesting parts of the previous version of the_class.
2590 // This is also where we clean out any unused weak references.
2591 // Note that while we delete nodes from the _previous_versions
2592 // array, we never delete the array itself until the klass is
2593 // unloaded. The has_been_redefined() query depends on that fact.
2594 //
2595 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2596 BitMap* emcp_methods, int emcp_method_count) {
2597 assert(Thread::current()->is_VM_thread(),
2598 "only VMThread can add previous versions");
2600 if (_previous_versions == NULL) {
2601 // This is the first previous version so make some space.
2602 // Start with 2 elements under the assumption that the class
2603 // won't be redefined much.
2604 _previous_versions = new (ResourceObj::C_HEAP)
2605 GrowableArray<PreviousVersionNode *>(2, true);
2606 }
2608 // RC_TRACE macro has an embedded ResourceMark
2609 RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
2610 ikh->external_name(), _previous_versions->length(), emcp_method_count));
2611 constantPoolHandle cp_h(ikh->constants());
2612 jobject cp_ref;
2613 if (cp_h->is_shared()) {
2614 // a shared ConstantPool requires a regular reference; a weak
2615 // reference would be collectible
2616 cp_ref = JNIHandles::make_global(cp_h);
2617 } else {
2618 cp_ref = JNIHandles::make_weak_global(cp_h);
2619 }
2620 PreviousVersionNode * pv_node = NULL;
2621 objArrayOop old_methods = ikh->methods();
2623 if (emcp_method_count == 0) {
2624 // non-shared ConstantPool gets a weak reference
2625 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL);
2626 RC_TRACE(0x00000400,
2627 ("add: all methods are obsolete; flushing any EMCP weak refs"));
2628 } else {
2629 int local_count = 0;
2630 GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP)
2631 GrowableArray<jweak>(emcp_method_count, true);
2632 for (int i = 0; i < old_methods->length(); i++) {
2633 if (emcp_methods->at(i)) {
2634 // this old method is EMCP so save a weak ref
2635 methodOop old_method = (methodOop) old_methods->obj_at(i);
2636 methodHandle old_method_h(old_method);
2637 jweak method_ref = JNIHandles::make_weak_global(old_method_h);
2638 method_refs->append(method_ref);
2639 if (++local_count >= emcp_method_count) {
2640 // no more EMCP methods so bail out now
2641 break;
2642 }
2643 }
2644 }
2645 // non-shared ConstantPool gets a weak reference
2646 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs);
2647 }
2649 _previous_versions->append(pv_node);
2651 // Using weak references allows the interesting parts of previous
2652 // classes to be GC'ed when they are no longer needed. Since the
2653 // caller is the VMThread and we are at a safepoint, this is a good
2654 // time to clear out unused weak references.
2656 RC_TRACE(0x00000400, ("add: previous version length=%d",
2657 _previous_versions->length()));
2659 // skip the last entry since we just added it
2660 for (int i = _previous_versions->length() - 2; i >= 0; i--) {
2661 // check the previous versions array for a GC'ed weak refs
2662 pv_node = _previous_versions->at(i);
2663 cp_ref = pv_node->prev_constant_pool();
2664 assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2665 if (cp_ref == NULL) {
2666 delete pv_node;
2667 _previous_versions->remove_at(i);
2668 // Since we are traversing the array backwards, we don't have to
2669 // do anything special with the index.
2670 continue; // robustness
2671 }
2673 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2674 if (cp == NULL) {
2675 // this entry has been GC'ed so remove it
2676 delete pv_node;
2677 _previous_versions->remove_at(i);
2678 // Since we are traversing the array backwards, we don't have to
2679 // do anything special with the index.
2680 continue;
2681 } else {
2682 RC_TRACE(0x00000400, ("add: previous version @%d is alive", i));
2683 }
2685 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2686 if (method_refs != NULL) {
2687 RC_TRACE(0x00000400, ("add: previous methods length=%d",
2688 method_refs->length()));
2689 for (int j = method_refs->length() - 1; j >= 0; j--) {
2690 jweak method_ref = method_refs->at(j);
2691 assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2692 if (method_ref == NULL) {
2693 method_refs->remove_at(j);
2694 // Since we are traversing the array backwards, we don't have to
2695 // do anything special with the index.
2696 continue; // robustness
2697 }
2699 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2700 if (method == NULL || emcp_method_count == 0) {
2701 // This method entry has been GC'ed or the current
2702 // RedefineClasses() call has made all methods obsolete
2703 // so remove it.
2704 JNIHandles::destroy_weak_global(method_ref);
2705 method_refs->remove_at(j);
2706 } else {
2707 // RC_TRACE macro has an embedded ResourceMark
2708 RC_TRACE(0x00000400,
2709 ("add: %s(%s): previous method @%d in version @%d is alive",
2710 method->name()->as_C_string(), method->signature()->as_C_string(),
2711 j, i));
2712 }
2713 }
2714 }
2715 }
2717 int obsolete_method_count = old_methods->length() - emcp_method_count;
2719 if (emcp_method_count != 0 && obsolete_method_count != 0 &&
2720 _previous_versions->length() > 1) {
2721 // We have a mix of obsolete and EMCP methods. If there is more
2722 // than the previous version that we just added, then we have to
2723 // clear out any matching EMCP method entries the hard way.
2724 int local_count = 0;
2725 for (int i = 0; i < old_methods->length(); i++) {
2726 if (!emcp_methods->at(i)) {
2727 // only obsolete methods are interesting
2728 methodOop old_method = (methodOop) old_methods->obj_at(i);
2729 Symbol* m_name = old_method->name();
2730 Symbol* m_signature = old_method->signature();
2732 // skip the last entry since we just added it
2733 for (int j = _previous_versions->length() - 2; j >= 0; j--) {
2734 // check the previous versions array for a GC'ed weak refs
2735 pv_node = _previous_versions->at(j);
2736 cp_ref = pv_node->prev_constant_pool();
2737 assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2738 if (cp_ref == NULL) {
2739 delete pv_node;
2740 _previous_versions->remove_at(j);
2741 // Since we are traversing the array backwards, we don't have to
2742 // do anything special with the index.
2743 continue; // robustness
2744 }
2746 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2747 if (cp == NULL) {
2748 // this entry has been GC'ed so remove it
2749 delete pv_node;
2750 _previous_versions->remove_at(j);
2751 // Since we are traversing the array backwards, we don't have to
2752 // do anything special with the index.
2753 continue;
2754 }
2756 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2757 if (method_refs == NULL) {
2758 // We have run into a PreviousVersion generation where
2759 // all methods were made obsolete during that generation's
2760 // RedefineClasses() operation. At the time of that
2761 // operation, all EMCP methods were flushed so we don't
2762 // have to go back any further.
2763 //
2764 // A NULL method_refs is different than an empty method_refs.
2765 // We cannot infer any optimizations about older generations
2766 // from an empty method_refs for the current generation.
2767 break;
2768 }
2770 for (int k = method_refs->length() - 1; k >= 0; k--) {
2771 jweak method_ref = method_refs->at(k);
2772 assert(method_ref != NULL,
2773 "weak method ref was unexpectedly cleared");
2774 if (method_ref == NULL) {
2775 method_refs->remove_at(k);
2776 // Since we are traversing the array backwards, we don't
2777 // have to do anything special with the index.
2778 continue; // robustness
2779 }
2781 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2782 if (method == NULL) {
2783 // this method entry has been GC'ed so skip it
2784 JNIHandles::destroy_weak_global(method_ref);
2785 method_refs->remove_at(k);
2786 continue;
2787 }
2789 if (method->name() == m_name &&
2790 method->signature() == m_signature) {
2791 // The current RedefineClasses() call has made all EMCP
2792 // versions of this method obsolete so mark it as obsolete
2793 // and remove the weak ref.
2794 RC_TRACE(0x00000400,
2795 ("add: %s(%s): flush obsolete method @%d in version @%d",
2796 m_name->as_C_string(), m_signature->as_C_string(), k, j));
2798 method->set_is_obsolete();
2799 JNIHandles::destroy_weak_global(method_ref);
2800 method_refs->remove_at(k);
2801 break;
2802 }
2803 }
2805 // The previous loop may not find a matching EMCP method, but
2806 // that doesn't mean that we can optimize and not go any
2807 // further back in the PreviousVersion generations. The EMCP
2808 // method for this generation could have already been GC'ed,
2809 // but there still may be an older EMCP method that has not
2810 // been GC'ed.
2811 }
2813 if (++local_count >= obsolete_method_count) {
2814 // no more obsolete methods so bail out now
2815 break;
2816 }
2817 }
2818 }
2819 }
2820 } // end add_previous_version()
2823 // Determine if instanceKlass has a previous version.
2824 bool instanceKlass::has_previous_version() const {
2825 if (_previous_versions == NULL) {
2826 // no previous versions array so answer is easy
2827 return false;
2828 }
2830 for (int i = _previous_versions->length() - 1; i >= 0; i--) {
2831 // Check the previous versions array for an info node that hasn't
2832 // been GC'ed
2833 PreviousVersionNode * pv_node = _previous_versions->at(i);
2835 jobject cp_ref = pv_node->prev_constant_pool();
2836 assert(cp_ref != NULL, "cp reference was unexpectedly cleared");
2837 if (cp_ref == NULL) {
2838 continue; // robustness
2839 }
2841 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2842 if (cp != NULL) {
2843 // we have at least one previous version
2844 return true;
2845 }
2847 // We don't have to check the method refs. If the constant pool has
2848 // been GC'ed then so have the methods.
2849 }
2851 // all of the underlying nodes' info has been GC'ed
2852 return false;
2853 } // end has_previous_version()
2855 methodOop instanceKlass::method_with_idnum(int idnum) {
2856 methodOop m = NULL;
2857 if (idnum < methods()->length()) {
2858 m = (methodOop) methods()->obj_at(idnum);
2859 }
2860 if (m == NULL || m->method_idnum() != idnum) {
2861 for (int index = 0; index < methods()->length(); ++index) {
2862 m = (methodOop) methods()->obj_at(index);
2863 if (m->method_idnum() == idnum) {
2864 return m;
2865 }
2866 }
2867 }
2868 return m;
2869 }
2872 // Set the annotation at 'idnum' to 'anno'.
2873 // We don't want to create or extend the array if 'anno' is NULL, since that is the
2874 // default value. However, if the array exists and is long enough, we must set NULL values.
2875 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) {
2876 objArrayOop md = *md_p;
2877 if (md != NULL && md->length() > idnum) {
2878 md->obj_at_put(idnum, anno);
2879 } else if (anno != NULL) {
2880 // create the array
2881 int length = MAX2(idnum+1, (int)_idnum_allocated_count);
2882 md = oopFactory::new_system_objArray(length, Thread::current());
2883 if (*md_p != NULL) {
2884 // copy the existing entries
2885 for (int index = 0; index < (*md_p)->length(); index++) {
2886 md->obj_at_put(index, (*md_p)->obj_at(index));
2887 }
2888 }
2889 set_annotations(md, md_p);
2890 md->obj_at_put(idnum, anno);
2891 } // if no array and idnum isn't included there is nothing to do
2892 }
2894 // Construct a PreviousVersionNode entry for the array hung off
2895 // the instanceKlass.
2896 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool,
2897 bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) {
2899 _prev_constant_pool = prev_constant_pool;
2900 _prev_cp_is_weak = prev_cp_is_weak;
2901 _prev_EMCP_methods = prev_EMCP_methods;
2902 }
2905 // Destroy a PreviousVersionNode
2906 PreviousVersionNode::~PreviousVersionNode() {
2907 if (_prev_constant_pool != NULL) {
2908 if (_prev_cp_is_weak) {
2909 JNIHandles::destroy_weak_global(_prev_constant_pool);
2910 } else {
2911 JNIHandles::destroy_global(_prev_constant_pool);
2912 }
2913 }
2915 if (_prev_EMCP_methods != NULL) {
2916 for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) {
2917 jweak method_ref = _prev_EMCP_methods->at(i);
2918 if (method_ref != NULL) {
2919 JNIHandles::destroy_weak_global(method_ref);
2920 }
2921 }
2922 delete _prev_EMCP_methods;
2923 }
2924 }
2927 // Construct a PreviousVersionInfo entry
2928 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
2929 _prev_constant_pool_handle = constantPoolHandle(); // NULL handle
2930 _prev_EMCP_method_handles = NULL;
2932 jobject cp_ref = pv_node->prev_constant_pool();
2933 assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared");
2934 if (cp_ref == NULL) {
2935 return; // robustness
2936 }
2938 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2939 if (cp == NULL) {
2940 // Weak reference has been GC'ed. Since the constant pool has been
2941 // GC'ed, the methods have also been GC'ed.
2942 return;
2943 }
2945 // make the constantPoolOop safe to return
2946 _prev_constant_pool_handle = constantPoolHandle(cp);
2948 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2949 if (method_refs == NULL) {
2950 // the instanceKlass did not have any EMCP methods
2951 return;
2952 }
2954 _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
2956 int n_methods = method_refs->length();
2957 for (int i = 0; i < n_methods; i++) {
2958 jweak method_ref = method_refs->at(i);
2959 assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2960 if (method_ref == NULL) {
2961 continue; // robustness
2962 }
2964 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2965 if (method == NULL) {
2966 // this entry has been GC'ed so skip it
2967 continue;
2968 }
2970 // make the methodOop safe to return
2971 _prev_EMCP_method_handles->append(methodHandle(method));
2972 }
2973 }
2976 // Destroy a PreviousVersionInfo
2977 PreviousVersionInfo::~PreviousVersionInfo() {
2978 // Since _prev_EMCP_method_handles is not C-heap allocated, we
2979 // don't have to delete it.
2980 }
2983 // Construct a helper for walking the previous versions array
2984 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) {
2985 _previous_versions = ik->previous_versions();
2986 _current_index = 0;
2987 // _hm needs no initialization
2988 _current_p = NULL;
2989 }
2992 // Destroy a PreviousVersionWalker
2993 PreviousVersionWalker::~PreviousVersionWalker() {
2994 // Delete the current info just in case the caller didn't walk to
2995 // the end of the previous versions list. No harm if _current_p is
2996 // already NULL.
2997 delete _current_p;
2999 // When _hm is destroyed, all the Handles returned in
3000 // PreviousVersionInfo objects will be destroyed.
3001 // Also, after this destructor is finished it will be
3002 // safe to delete the GrowableArray allocated in the
3003 // PreviousVersionInfo objects.
3004 }
3007 // Return the interesting information for the next previous version
3008 // of the klass. Returns NULL if there are no more previous versions.
3009 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
3010 if (_previous_versions == NULL) {
3011 // no previous versions so nothing to return
3012 return NULL;
3013 }
3015 delete _current_p; // cleanup the previous info for the caller
3016 _current_p = NULL; // reset to NULL so we don't delete same object twice
3018 int length = _previous_versions->length();
3020 while (_current_index < length) {
3021 PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
3022 PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP)
3023 PreviousVersionInfo(pv_node);
3025 constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
3026 if (cp_h.is_null()) {
3027 delete pv_info;
3029 // The underlying node's info has been GC'ed so try the next one.
3030 // We don't have to check the methods. If the constant pool has
3031 // GC'ed then so have the methods.
3032 continue;
3033 }
3035 // Found a node with non GC'ed info so return it. The caller will
3036 // need to delete pv_info when they are done with it.
3037 _current_p = pv_info;
3038 return pv_info;
3039 }
3041 // all of the underlying nodes' info has been GC'ed
3042 return NULL;
3043 } // end next_previous_version()