Wed, 17 Mar 2010 11:01:05 +0100
6935224: Adding new DTrace probes to work with Palantir
Summary: Adding probes related to thread scheduling and class initialization
Reviewed-by: kamg, never
1 /*
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_instanceKlass.cpp.incl"
28 #ifdef DTRACE_ENABLED
30 HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
31 char*, intptr_t, oop, intptr_t);
32 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
33 char*, intptr_t, oop, intptr_t, int);
34 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
35 char*, intptr_t, oop, intptr_t, int);
36 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
37 char*, intptr_t, oop, intptr_t, int);
38 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
39 char*, intptr_t, oop, intptr_t, int);
40 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
41 char*, intptr_t, oop, intptr_t, int);
42 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
43 char*, intptr_t, oop, intptr_t, int);
44 HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
45 char*, intptr_t, oop, intptr_t, int);
47 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type) \
48 { \
49 char* data = NULL; \
50 int len = 0; \
51 symbolOop name = (clss)->name(); \
52 if (name != NULL) { \
53 data = (char*)name->bytes(); \
54 len = name->utf8_length(); \
55 } \
56 HS_DTRACE_PROBE4(hotspot, class__initialization__##type, \
57 data, len, (clss)->class_loader(), thread_type); \
58 }
60 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
61 { \
62 char* data = NULL; \
63 int len = 0; \
64 symbolOop name = (clss)->name(); \
65 if (name != NULL) { \
66 data = (char*)name->bytes(); \
67 len = name->utf8_length(); \
68 } \
69 HS_DTRACE_PROBE5(hotspot, class__initialization__##type, \
70 data, len, (clss)->class_loader(), thread_type, wait); \
71 }
73 #else // ndef DTRACE_ENABLED
75 #define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)
76 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait)
78 #endif // ndef DTRACE_ENABLED
80 bool instanceKlass::should_be_initialized() const {
81 return !is_initialized();
82 }
84 klassVtable* instanceKlass::vtable() const {
85 return new klassVtable(as_klassOop(), start_of_vtable(), vtable_length() / vtableEntry::size());
86 }
88 klassItable* instanceKlass::itable() const {
89 return new klassItable(as_klassOop());
90 }
92 void instanceKlass::eager_initialize(Thread *thread) {
93 if (!EagerInitialization) return;
95 if (this->is_not_initialized()) {
96 // abort if the the class has a class initializer
97 if (this->class_initializer() != NULL) return;
99 // abort if it is java.lang.Object (initialization is handled in genesis)
100 klassOop super = this->super();
101 if (super == NULL) return;
103 // abort if the super class should be initialized
104 if (!instanceKlass::cast(super)->is_initialized()) return;
106 // call body to expose the this pointer
107 instanceKlassHandle this_oop(thread, this->as_klassOop());
108 eager_initialize_impl(this_oop);
109 }
110 }
113 void instanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
114 EXCEPTION_MARK;
115 ObjectLocker ol(this_oop, THREAD);
117 // abort if someone beat us to the initialization
118 if (!this_oop->is_not_initialized()) return; // note: not equivalent to is_initialized()
120 ClassState old_state = this_oop->_init_state;
121 link_class_impl(this_oop, true, THREAD);
122 if (HAS_PENDING_EXCEPTION) {
123 CLEAR_PENDING_EXCEPTION;
124 // Abort if linking the class throws an exception.
126 // Use a test to avoid redundantly resetting the state if there's
127 // no change. Set_init_state() asserts that state changes make
128 // progress, whereas here we might just be spinning in place.
129 if( old_state != this_oop->_init_state )
130 this_oop->set_init_state (old_state);
131 } else {
132 // linking successfull, mark class as initialized
133 this_oop->set_init_state (fully_initialized);
134 // trace
135 if (TraceClassInitialization) {
136 ResourceMark rm(THREAD);
137 tty->print_cr("[Initialized %s without side effects]", this_oop->external_name());
138 }
139 }
140 }
143 // See "The Virtual Machine Specification" section 2.16.5 for a detailed explanation of the class initialization
144 // process. The step comments refers to the procedure described in that section.
145 // Note: implementation moved to static method to expose the this pointer.
146 void instanceKlass::initialize(TRAPS) {
147 if (this->should_be_initialized()) {
148 HandleMark hm(THREAD);
149 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
150 initialize_impl(this_oop, CHECK);
151 // Note: at this point the class may be initialized
152 // OR it may be in the state of being initialized
153 // in case of recursive initialization!
154 } else {
155 assert(is_initialized(), "sanity check");
156 }
157 }
160 bool instanceKlass::verify_code(
161 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
162 // 1) Verify the bytecodes
163 Verifier::Mode mode =
164 throw_verifyerror ? Verifier::ThrowException : Verifier::NoException;
165 return Verifier::verify(this_oop, mode, this_oop->should_verify_class(), CHECK_false);
166 }
169 // Used exclusively by the shared spaces dump mechanism to prevent
170 // classes mapped into the shared regions in new VMs from appearing linked.
172 void instanceKlass::unlink_class() {
173 assert(is_linked(), "must be linked");
174 _init_state = loaded;
175 }
177 void instanceKlass::link_class(TRAPS) {
178 assert(is_loaded(), "must be loaded");
179 if (!is_linked()) {
180 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
181 link_class_impl(this_oop, true, CHECK);
182 }
183 }
185 // Called to verify that a class can link during initialization, without
186 // throwing a VerifyError.
187 bool instanceKlass::link_class_or_fail(TRAPS) {
188 assert(is_loaded(), "must be loaded");
189 if (!is_linked()) {
190 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
191 link_class_impl(this_oop, false, CHECK_false);
192 }
193 return is_linked();
194 }
196 bool instanceKlass::link_class_impl(
197 instanceKlassHandle this_oop, bool throw_verifyerror, TRAPS) {
198 // check for error state
199 if (this_oop->is_in_error_state()) {
200 ResourceMark rm(THREAD);
201 THROW_MSG_(vmSymbols::java_lang_NoClassDefFoundError(),
202 this_oop->external_name(), false);
203 }
204 // return if already verified
205 if (this_oop->is_linked()) {
206 return true;
207 }
209 // Timing
210 // timer handles recursion
211 assert(THREAD->is_Java_thread(), "non-JavaThread in link_class_impl");
212 JavaThread* jt = (JavaThread*)THREAD;
214 // link super class before linking this class
215 instanceKlassHandle super(THREAD, this_oop->super());
216 if (super.not_null()) {
217 if (super->is_interface()) { // check if super class is an interface
218 ResourceMark rm(THREAD);
219 Exceptions::fthrow(
220 THREAD_AND_LOCATION,
221 vmSymbolHandles::java_lang_IncompatibleClassChangeError(),
222 "class %s has interface %s as super class",
223 this_oop->external_name(),
224 super->external_name()
225 );
226 return false;
227 }
229 link_class_impl(super, throw_verifyerror, CHECK_false);
230 }
232 // link all interfaces implemented by this class before linking this class
233 objArrayHandle interfaces (THREAD, this_oop->local_interfaces());
234 int num_interfaces = interfaces->length();
235 for (int index = 0; index < num_interfaces; index++) {
236 HandleMark hm(THREAD);
237 instanceKlassHandle ih(THREAD, klassOop(interfaces->obj_at(index)));
238 link_class_impl(ih, throw_verifyerror, CHECK_false);
239 }
241 // in case the class is linked in the process of linking its superclasses
242 if (this_oop->is_linked()) {
243 return true;
244 }
246 // trace only the link time for this klass that includes
247 // the verification time
248 PerfClassTraceTime vmtimer(ClassLoader::perf_class_link_time(),
249 ClassLoader::perf_class_link_selftime(),
250 ClassLoader::perf_classes_linked(),
251 jt->get_thread_stat()->perf_recursion_counts_addr(),
252 jt->get_thread_stat()->perf_timers_addr(),
253 PerfClassTraceTime::CLASS_LINK);
255 // verification & rewriting
256 {
257 ObjectLocker ol(this_oop, THREAD);
258 // rewritten will have been set if loader constraint error found
259 // on an earlier link attempt
260 // don't verify or rewrite if already rewritten
261 if (!this_oop->is_linked()) {
262 if (!this_oop->is_rewritten()) {
263 {
264 // Timer includes any side effects of class verification (resolution,
265 // etc), but not recursive entry into verify_code().
266 PerfClassTraceTime timer(ClassLoader::perf_class_verify_time(),
267 ClassLoader::perf_class_verify_selftime(),
268 ClassLoader::perf_classes_verified(),
269 jt->get_thread_stat()->perf_recursion_counts_addr(),
270 jt->get_thread_stat()->perf_timers_addr(),
271 PerfClassTraceTime::CLASS_VERIFY);
272 bool verify_ok = verify_code(this_oop, throw_verifyerror, THREAD);
273 if (!verify_ok) {
274 return false;
275 }
276 }
278 // Just in case a side-effect of verify linked this class already
279 // (which can sometimes happen since the verifier loads classes
280 // using custom class loaders, which are free to initialize things)
281 if (this_oop->is_linked()) {
282 return true;
283 }
285 // also sets rewritten
286 this_oop->rewrite_class(CHECK_false);
287 }
289 // Initialize the vtable and interface table after
290 // methods have been rewritten since rewrite may
291 // fabricate new methodOops.
292 // also does loader constraint checking
293 if (!this_oop()->is_shared()) {
294 ResourceMark rm(THREAD);
295 this_oop->vtable()->initialize_vtable(true, CHECK_false);
296 this_oop->itable()->initialize_itable(true, CHECK_false);
297 }
298 #ifdef ASSERT
299 else {
300 ResourceMark rm(THREAD);
301 this_oop->vtable()->verify(tty, true);
302 // In case itable verification is ever added.
303 // this_oop->itable()->verify(tty, true);
304 }
305 #endif
306 this_oop->set_init_state(linked);
307 if (JvmtiExport::should_post_class_prepare()) {
308 Thread *thread = THREAD;
309 assert(thread->is_Java_thread(), "thread->is_Java_thread()");
310 JvmtiExport::post_class_prepare((JavaThread *) thread, this_oop());
311 }
312 }
313 }
314 return true;
315 }
318 // Rewrite the byte codes of all of the methods of a class.
319 // Three cases:
320 // During the link of a newly loaded class.
321 // During the preloading of classes to be written to the shared spaces.
322 // - Rewrite the methods and update the method entry points.
323 //
324 // During the link of a class in the shared spaces.
325 // - The methods were already rewritten, update the metho entry points.
326 //
327 // The rewriter must be called exactly once. Rewriting must happen after
328 // verification but before the first method of the class is executed.
330 void instanceKlass::rewrite_class(TRAPS) {
331 assert(is_loaded(), "must be loaded");
332 instanceKlassHandle this_oop(THREAD, this->as_klassOop());
333 if (this_oop->is_rewritten()) {
334 assert(this_oop()->is_shared(), "rewriting an unshared class?");
335 return;
336 }
337 Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
338 this_oop->set_rewritten();
339 }
342 void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
343 // Make sure klass is linked (verified) before initialization
344 // A class could already be verified, since it has been reflected upon.
345 this_oop->link_class(CHECK);
347 DTRACE_CLASSINIT_PROBE(required, instanceKlass::cast(this_oop()), -1);
349 bool wait = false;
351 // refer to the JVM book page 47 for description of steps
352 // Step 1
353 { ObjectLocker ol(this_oop, THREAD);
355 Thread *self = THREAD; // it's passed the current thread
357 // Step 2
358 // If we were to use wait() instead of waitInterruptibly() then
359 // we might end up throwing IE from link/symbol resolution sites
360 // that aren't expected to throw. This would wreak havoc. See 6320309.
361 while(this_oop->is_being_initialized() && !this_oop->is_reentrant_initialization(self)) {
362 wait = true;
363 ol.waitUninterruptibly(CHECK);
364 }
366 // Step 3
367 if (this_oop->is_being_initialized() && this_oop->is_reentrant_initialization(self)) {
368 DTRACE_CLASSINIT_PROBE_WAIT(recursive, instanceKlass::cast(this_oop()), -1,wait);
369 return;
370 }
372 // Step 4
373 if (this_oop->is_initialized()) {
374 DTRACE_CLASSINIT_PROBE_WAIT(concurrent, instanceKlass::cast(this_oop()), -1,wait);
375 return;
376 }
378 // Step 5
379 if (this_oop->is_in_error_state()) {
380 DTRACE_CLASSINIT_PROBE_WAIT(erroneous, instanceKlass::cast(this_oop()), -1,wait);
381 ResourceMark rm(THREAD);
382 const char* desc = "Could not initialize class ";
383 const char* className = this_oop->external_name();
384 size_t msglen = strlen(desc) + strlen(className) + 1;
385 char* message = NEW_C_HEAP_ARRAY(char, msglen);
386 if (NULL == message) {
387 // Out of memory: can't create detailed error message
388 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
389 } else {
390 jio_snprintf(message, msglen, "%s%s", desc, className);
391 THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
392 }
393 }
395 // Step 6
396 this_oop->set_init_state(being_initialized);
397 this_oop->set_init_thread(self);
398 }
400 // Step 7
401 klassOop super_klass = this_oop->super();
402 if (super_klass != NULL && !this_oop->is_interface() && Klass::cast(super_klass)->should_be_initialized()) {
403 Klass::cast(super_klass)->initialize(THREAD);
405 if (HAS_PENDING_EXCEPTION) {
406 Handle e(THREAD, PENDING_EXCEPTION);
407 CLEAR_PENDING_EXCEPTION;
408 {
409 EXCEPTION_MARK;
410 this_oop->set_initialization_state_and_notify(initialization_error, THREAD); // Locks object, set state, and notify all waiting threads
411 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, superclass initialization error is thrown below
412 }
413 DTRACE_CLASSINIT_PROBE_WAIT(super__failed, instanceKlass::cast(this_oop()), -1,wait);
414 THROW_OOP(e());
415 }
416 }
418 // Step 8
419 {
420 assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl");
421 JavaThread* jt = (JavaThread*)THREAD;
422 DTRACE_CLASSINIT_PROBE_WAIT(clinit, instanceKlass::cast(this_oop()), -1,wait);
423 // Timer includes any side effects of class initialization (resolution,
424 // etc), but not recursive entry into call_class_initializer().
425 PerfClassTraceTime timer(ClassLoader::perf_class_init_time(),
426 ClassLoader::perf_class_init_selftime(),
427 ClassLoader::perf_classes_inited(),
428 jt->get_thread_stat()->perf_recursion_counts_addr(),
429 jt->get_thread_stat()->perf_timers_addr(),
430 PerfClassTraceTime::CLASS_CLINIT);
431 this_oop->call_class_initializer(THREAD);
432 }
434 // Step 9
435 if (!HAS_PENDING_EXCEPTION) {
436 this_oop->set_initialization_state_and_notify(fully_initialized, CHECK);
437 { ResourceMark rm(THREAD);
438 debug_only(this_oop->vtable()->verify(tty, true);)
439 }
440 }
441 else {
442 // Step 10 and 11
443 Handle e(THREAD, PENDING_EXCEPTION);
444 CLEAR_PENDING_EXCEPTION;
445 {
446 EXCEPTION_MARK;
447 this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
448 CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below
449 }
450 DTRACE_CLASSINIT_PROBE_WAIT(error, instanceKlass::cast(this_oop()), -1,wait);
451 if (e->is_a(SystemDictionary::Error_klass())) {
452 THROW_OOP(e());
453 } else {
454 JavaCallArguments args(e);
455 THROW_ARG(vmSymbolHandles::java_lang_ExceptionInInitializerError(),
456 vmSymbolHandles::throwable_void_signature(),
457 &args);
458 }
459 }
460 DTRACE_CLASSINIT_PROBE_WAIT(end, instanceKlass::cast(this_oop()), -1,wait);
461 }
464 // Note: implementation moved to static method to expose the this pointer.
465 void instanceKlass::set_initialization_state_and_notify(ClassState state, TRAPS) {
466 instanceKlassHandle kh(THREAD, this->as_klassOop());
467 set_initialization_state_and_notify_impl(kh, state, CHECK);
468 }
470 void instanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
471 ObjectLocker ol(this_oop, THREAD);
472 this_oop->set_init_state(state);
473 ol.notify_all(CHECK);
474 }
476 void instanceKlass::add_implementor(klassOop k) {
477 assert(Compile_lock->owned_by_self(), "");
478 // Filter out my subinterfaces.
479 // (Note: Interfaces are never on the subklass list.)
480 if (instanceKlass::cast(k)->is_interface()) return;
482 // Filter out subclasses whose supers already implement me.
483 // (Note: CHA must walk subclasses of direct implementors
484 // in order to locate indirect implementors.)
485 klassOop sk = instanceKlass::cast(k)->super();
486 if (sk != NULL && instanceKlass::cast(sk)->implements_interface(as_klassOop()))
487 // We only need to check one immediate superclass, since the
488 // implements_interface query looks at transitive_interfaces.
489 // Any supers of the super have the same (or fewer) transitive_interfaces.
490 return;
492 // Update number of implementors
493 int i = _nof_implementors++;
495 // Record this implementor, if there are not too many already
496 if (i < implementors_limit) {
497 assert(_implementors[i] == NULL, "should be exactly one implementor");
498 oop_store_without_check((oop*)&_implementors[i], k);
499 } else if (i == implementors_limit) {
500 // clear out the list on first overflow
501 for (int i2 = 0; i2 < implementors_limit; i2++)
502 oop_store_without_check((oop*)&_implementors[i2], NULL);
503 }
505 // The implementor also implements the transitive_interfaces
506 for (int index = 0; index < local_interfaces()->length(); index++) {
507 instanceKlass::cast(klassOop(local_interfaces()->obj_at(index)))->add_implementor(k);
508 }
509 }
511 void instanceKlass::init_implementor() {
512 for (int i = 0; i < implementors_limit; i++)
513 oop_store_without_check((oop*)&_implementors[i], NULL);
514 _nof_implementors = 0;
515 }
518 void instanceKlass::process_interfaces(Thread *thread) {
519 // link this class into the implementors list of every interface it implements
520 KlassHandle this_as_oop (thread, this->as_klassOop());
521 for (int i = local_interfaces()->length() - 1; i >= 0; i--) {
522 assert(local_interfaces()->obj_at(i)->is_klass(), "must be a klass");
523 instanceKlass* interf = instanceKlass::cast(klassOop(local_interfaces()->obj_at(i)));
524 assert(interf->is_interface(), "expected interface");
525 interf->add_implementor(this_as_oop());
526 }
527 }
529 bool instanceKlass::can_be_primary_super_slow() const {
530 if (is_interface())
531 return false;
532 else
533 return Klass::can_be_primary_super_slow();
534 }
536 objArrayOop instanceKlass::compute_secondary_supers(int num_extra_slots, TRAPS) {
537 // The secondaries are the implemented interfaces.
538 instanceKlass* ik = instanceKlass::cast(as_klassOop());
539 objArrayHandle interfaces (THREAD, ik->transitive_interfaces());
540 int num_secondaries = num_extra_slots + interfaces->length();
541 if (num_secondaries == 0) {
542 return Universe::the_empty_system_obj_array();
543 } else if (num_extra_slots == 0) {
544 return interfaces();
545 } else {
546 // a mix of both
547 objArrayOop secondaries = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
548 for (int i = 0; i < interfaces->length(); i++) {
549 secondaries->obj_at_put(num_extra_slots+i, interfaces->obj_at(i));
550 }
551 return secondaries;
552 }
553 }
555 bool instanceKlass::compute_is_subtype_of(klassOop k) {
556 if (Klass::cast(k)->is_interface()) {
557 return implements_interface(k);
558 } else {
559 return Klass::compute_is_subtype_of(k);
560 }
561 }
563 bool instanceKlass::implements_interface(klassOop k) const {
564 if (as_klassOop() == k) return true;
565 assert(Klass::cast(k)->is_interface(), "should be an interface class");
566 for (int i = 0; i < transitive_interfaces()->length(); i++) {
567 if (transitive_interfaces()->obj_at(i) == k) {
568 return true;
569 }
570 }
571 return false;
572 }
574 objArrayOop instanceKlass::allocate_objArray(int n, int length, TRAPS) {
575 if (length < 0) THROW_0(vmSymbols::java_lang_NegativeArraySizeException());
576 if (length > arrayOopDesc::max_array_length(T_OBJECT)) {
577 report_java_out_of_memory("Requested array size exceeds VM limit");
578 THROW_OOP_0(Universe::out_of_memory_error_array_size());
579 }
580 int size = objArrayOopDesc::object_size(length);
581 klassOop ak = array_klass(n, CHECK_NULL);
582 KlassHandle h_ak (THREAD, ak);
583 objArrayOop o =
584 (objArrayOop)CollectedHeap::array_allocate(h_ak, size, length, CHECK_NULL);
585 return o;
586 }
588 instanceOop instanceKlass::register_finalizer(instanceOop i, TRAPS) {
589 if (TraceFinalizerRegistration) {
590 tty->print("Registered ");
591 i->print_value_on(tty);
592 tty->print_cr(" (" INTPTR_FORMAT ") as finalizable", (address)i);
593 }
594 instanceHandle h_i(THREAD, i);
595 // Pass the handle as argument, JavaCalls::call expects oop as jobjects
596 JavaValue result(T_VOID);
597 JavaCallArguments args(h_i);
598 methodHandle mh (THREAD, Universe::finalizer_register_method());
599 JavaCalls::call(&result, mh, &args, CHECK_NULL);
600 return h_i();
601 }
603 instanceOop instanceKlass::allocate_instance(TRAPS) {
604 bool has_finalizer_flag = has_finalizer(); // Query before possible GC
605 int size = size_helper(); // Query before forming handle.
607 KlassHandle h_k(THREAD, as_klassOop());
609 instanceOop i;
611 i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
612 if (has_finalizer_flag && !RegisterFinalizersAtInit) {
613 i = register_finalizer(i, CHECK_NULL);
614 }
615 return i;
616 }
618 instanceOop instanceKlass::allocate_permanent_instance(TRAPS) {
619 // Finalizer registration occurs in the Object.<init> constructor
620 // and constructors normally aren't run when allocating perm
621 // instances so simply disallow finalizable perm objects. This can
622 // be relaxed if a need for it is found.
623 assert(!has_finalizer(), "perm objects not allowed to have finalizers");
624 int size = size_helper(); // Query before forming handle.
625 KlassHandle h_k(THREAD, as_klassOop());
626 instanceOop i = (instanceOop)
627 CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
628 return i;
629 }
631 void instanceKlass::check_valid_for_instantiation(bool throwError, TRAPS) {
632 if (is_interface() || is_abstract()) {
633 ResourceMark rm(THREAD);
634 THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
635 : vmSymbols::java_lang_InstantiationException(), external_name());
636 }
637 if (as_klassOop() == SystemDictionary::Class_klass()) {
638 ResourceMark rm(THREAD);
639 THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
640 : vmSymbols::java_lang_IllegalAccessException(), external_name());
641 }
642 }
644 klassOop instanceKlass::array_klass_impl(bool or_null, int n, TRAPS) {
645 instanceKlassHandle this_oop(THREAD, as_klassOop());
646 return array_klass_impl(this_oop, or_null, n, THREAD);
647 }
649 klassOop instanceKlass::array_klass_impl(instanceKlassHandle this_oop, bool or_null, int n, TRAPS) {
650 if (this_oop->array_klasses() == NULL) {
651 if (or_null) return NULL;
653 ResourceMark rm;
654 JavaThread *jt = (JavaThread *)THREAD;
655 {
656 // Atomic creation of array_klasses
657 MutexLocker mc(Compile_lock, THREAD); // for vtables
658 MutexLocker ma(MultiArray_lock, THREAD);
660 // Check if update has already taken place
661 if (this_oop->array_klasses() == NULL) {
662 objArrayKlassKlass* oakk =
663 (objArrayKlassKlass*)Universe::objArrayKlassKlassObj()->klass_part();
665 klassOop k = oakk->allocate_objArray_klass(1, this_oop, CHECK_NULL);
666 this_oop->set_array_klasses(k);
667 }
668 }
669 }
670 // _this will always be set at this point
671 objArrayKlass* oak = (objArrayKlass*)this_oop->array_klasses()->klass_part();
672 if (or_null) {
673 return oak->array_klass_or_null(n);
674 }
675 return oak->array_klass(n, CHECK_NULL);
676 }
678 klassOop instanceKlass::array_klass_impl(bool or_null, TRAPS) {
679 return array_klass_impl(or_null, 1, THREAD);
680 }
682 void instanceKlass::call_class_initializer(TRAPS) {
683 instanceKlassHandle ik (THREAD, as_klassOop());
684 call_class_initializer_impl(ik, THREAD);
685 }
687 static int call_class_initializer_impl_counter = 0; // for debugging
689 methodOop instanceKlass::class_initializer() {
690 return find_method(vmSymbols::class_initializer_name(), vmSymbols::void_method_signature());
691 }
693 void instanceKlass::call_class_initializer_impl(instanceKlassHandle this_oop, TRAPS) {
694 methodHandle h_method(THREAD, this_oop->class_initializer());
695 assert(!this_oop->is_initialized(), "we cannot initialize twice");
696 if (TraceClassInitialization) {
697 tty->print("%d Initializing ", call_class_initializer_impl_counter++);
698 this_oop->name()->print_value();
699 tty->print_cr("%s (" INTPTR_FORMAT ")", h_method() == NULL ? "(no method)" : "", (address)this_oop());
700 }
701 if (h_method() != NULL) {
702 JavaCallArguments args; // No arguments
703 JavaValue result(T_VOID);
704 JavaCalls::call(&result, h_method, &args, CHECK); // Static call (no args)
705 }
706 }
709 void instanceKlass::mask_for(methodHandle method, int bci,
710 InterpreterOopMap* entry_for) {
711 // Dirty read, then double-check under a lock.
712 if (_oop_map_cache == NULL) {
713 // Otherwise, allocate a new one.
714 MutexLocker x(OopMapCacheAlloc_lock);
715 // First time use. Allocate a cache in C heap
716 if (_oop_map_cache == NULL) {
717 _oop_map_cache = new OopMapCache();
718 }
719 }
720 // _oop_map_cache is constant after init; lookup below does is own locking.
721 _oop_map_cache->lookup(method, bci, entry_for);
722 }
725 bool instanceKlass::find_local_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
726 const int n = fields()->length();
727 for (int i = 0; i < n; i += next_offset ) {
728 int name_index = fields()->ushort_at(i + name_index_offset);
729 int sig_index = fields()->ushort_at(i + signature_index_offset);
730 symbolOop f_name = constants()->symbol_at(name_index);
731 symbolOop f_sig = constants()->symbol_at(sig_index);
732 if (f_name == name && f_sig == sig) {
733 fd->initialize(as_klassOop(), i);
734 return true;
735 }
736 }
737 return false;
738 }
741 void instanceKlass::field_names_and_sigs_iterate(OopClosure* closure) {
742 const int n = fields()->length();
743 for (int i = 0; i < n; i += next_offset ) {
744 int name_index = fields()->ushort_at(i + name_index_offset);
745 symbolOop name = constants()->symbol_at(name_index);
746 closure->do_oop((oop*)&name);
748 int sig_index = fields()->ushort_at(i + signature_index_offset);
749 symbolOop sig = constants()->symbol_at(sig_index);
750 closure->do_oop((oop*)&sig);
751 }
752 }
755 klassOop instanceKlass::find_interface_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
756 const int n = local_interfaces()->length();
757 for (int i = 0; i < n; i++) {
758 klassOop intf1 = klassOop(local_interfaces()->obj_at(i));
759 assert(Klass::cast(intf1)->is_interface(), "just checking type");
760 // search for field in current interface
761 if (instanceKlass::cast(intf1)->find_local_field(name, sig, fd)) {
762 assert(fd->is_static(), "interface field must be static");
763 return intf1;
764 }
765 // search for field in direct superinterfaces
766 klassOop intf2 = instanceKlass::cast(intf1)->find_interface_field(name, sig, fd);
767 if (intf2 != NULL) return intf2;
768 }
769 // otherwise field lookup fails
770 return NULL;
771 }
774 klassOop instanceKlass::find_field(symbolOop name, symbolOop sig, fieldDescriptor* fd) const {
775 // search order according to newest JVM spec (5.4.3.2, p.167).
776 // 1) search for field in current klass
777 if (find_local_field(name, sig, fd)) {
778 return as_klassOop();
779 }
780 // 2) search for field recursively in direct superinterfaces
781 { klassOop intf = find_interface_field(name, sig, fd);
782 if (intf != NULL) return intf;
783 }
784 // 3) apply field lookup recursively if superclass exists
785 { klassOop supr = super();
786 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, fd);
787 }
788 // 4) otherwise field lookup fails
789 return NULL;
790 }
793 klassOop instanceKlass::find_field(symbolOop name, symbolOop sig, bool is_static, fieldDescriptor* fd) const {
794 // search order according to newest JVM spec (5.4.3.2, p.167).
795 // 1) search for field in current klass
796 if (find_local_field(name, sig, fd)) {
797 if (fd->is_static() == is_static) return as_klassOop();
798 }
799 // 2) search for field recursively in direct superinterfaces
800 if (is_static) {
801 klassOop intf = find_interface_field(name, sig, fd);
802 if (intf != NULL) return intf;
803 }
804 // 3) apply field lookup recursively if superclass exists
805 { klassOop supr = super();
806 if (supr != NULL) return instanceKlass::cast(supr)->find_field(name, sig, is_static, fd);
807 }
808 // 4) otherwise field lookup fails
809 return NULL;
810 }
813 bool instanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
814 int length = fields()->length();
815 for (int i = 0; i < length; i += next_offset) {
816 if (offset_from_fields( i ) == offset) {
817 fd->initialize(as_klassOop(), i);
818 if (fd->is_static() == is_static) return true;
819 }
820 }
821 return false;
822 }
825 bool instanceKlass::find_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
826 klassOop klass = as_klassOop();
827 while (klass != NULL) {
828 if (instanceKlass::cast(klass)->find_local_field_from_offset(offset, is_static, fd)) {
829 return true;
830 }
831 klass = Klass::cast(klass)->super();
832 }
833 return false;
834 }
837 void instanceKlass::methods_do(void f(methodOop method)) {
838 int len = methods()->length();
839 for (int index = 0; index < len; index++) {
840 methodOop m = methodOop(methods()->obj_at(index));
841 assert(m->is_method(), "must be method");
842 f(m);
843 }
844 }
846 void instanceKlass::do_local_static_fields(FieldClosure* cl) {
847 fieldDescriptor fd;
848 int length = fields()->length();
849 for (int i = 0; i < length; i += next_offset) {
850 fd.initialize(as_klassOop(), i);
851 if (fd.is_static()) cl->do_field(&fd);
852 }
853 }
856 void instanceKlass::do_local_static_fields(void f(fieldDescriptor*, TRAPS), TRAPS) {
857 instanceKlassHandle h_this(THREAD, as_klassOop());
858 do_local_static_fields_impl(h_this, f, CHECK);
859 }
862 void instanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
863 fieldDescriptor fd;
864 int length = this_oop->fields()->length();
865 for (int i = 0; i < length; i += next_offset) {
866 fd.initialize(this_oop(), i);
867 if (fd.is_static()) { f(&fd, CHECK); } // Do NOT remove {}! (CHECK macro expands into several statements)
868 }
869 }
872 static int compare_fields_by_offset(int* a, int* b) {
873 return a[0] - b[0];
874 }
876 void instanceKlass::do_nonstatic_fields(FieldClosure* cl) {
877 instanceKlass* super = superklass();
878 if (super != NULL) {
879 super->do_nonstatic_fields(cl);
880 }
881 fieldDescriptor fd;
882 int length = fields()->length();
883 // In DebugInfo nonstatic fields are sorted by offset.
884 int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1));
885 int j = 0;
886 for (int i = 0; i < length; i += next_offset) {
887 fd.initialize(as_klassOop(), i);
888 if (!fd.is_static()) {
889 fields_sorted[j + 0] = fd.offset();
890 fields_sorted[j + 1] = i;
891 j += 2;
892 }
893 }
894 if (j > 0) {
895 length = j;
896 // _sort_Fn is defined in growableArray.hpp.
897 qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
898 for (int i = 0; i < length; i += 2) {
899 fd.initialize(as_klassOop(), fields_sorted[i + 1]);
900 assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
901 cl->do_field(&fd);
902 }
903 }
904 FREE_C_HEAP_ARRAY(int, fields_sorted);
905 }
908 void instanceKlass::array_klasses_do(void f(klassOop k)) {
909 if (array_klasses() != NULL)
910 arrayKlass::cast(array_klasses())->array_klasses_do(f);
911 }
914 void instanceKlass::with_array_klasses_do(void f(klassOop k)) {
915 f(as_klassOop());
916 array_klasses_do(f);
917 }
919 #ifdef ASSERT
920 static int linear_search(objArrayOop methods, symbolOop name, symbolOop signature) {
921 int len = methods->length();
922 for (int index = 0; index < len; index++) {
923 methodOop m = (methodOop)(methods->obj_at(index));
924 assert(m->is_method(), "must be method");
925 if (m->signature() == signature && m->name() == name) {
926 return index;
927 }
928 }
929 return -1;
930 }
931 #endif
933 methodOop instanceKlass::find_method(symbolOop name, symbolOop signature) const {
934 return instanceKlass::find_method(methods(), name, signature);
935 }
937 methodOop instanceKlass::find_method(objArrayOop methods, symbolOop name, symbolOop signature) {
938 int len = methods->length();
939 // methods are sorted, so do binary search
940 int l = 0;
941 int h = len - 1;
942 while (l <= h) {
943 int mid = (l + h) >> 1;
944 methodOop m = (methodOop)methods->obj_at(mid);
945 assert(m->is_method(), "must be method");
946 int res = m->name()->fast_compare(name);
947 if (res == 0) {
948 // found matching name; do linear search to find matching signature
949 // first, quick check for common case
950 if (m->signature() == signature) return m;
951 // search downwards through overloaded methods
952 int i;
953 for (i = mid - 1; i >= l; i--) {
954 methodOop m = (methodOop)methods->obj_at(i);
955 assert(m->is_method(), "must be method");
956 if (m->name() != name) break;
957 if (m->signature() == signature) return m;
958 }
959 // search upwards
960 for (i = mid + 1; i <= h; i++) {
961 methodOop m = (methodOop)methods->obj_at(i);
962 assert(m->is_method(), "must be method");
963 if (m->name() != name) break;
964 if (m->signature() == signature) return m;
965 }
966 // not found
967 #ifdef ASSERT
968 int index = linear_search(methods, name, signature);
969 if (index != -1) fatal1("binary search bug: should have found entry %d", index);
970 #endif
971 return NULL;
972 } else if (res < 0) {
973 l = mid + 1;
974 } else {
975 h = mid - 1;
976 }
977 }
978 #ifdef ASSERT
979 int index = linear_search(methods, name, signature);
980 if (index != -1) fatal1("binary search bug: should have found entry %d", index);
981 #endif
982 return NULL;
983 }
985 methodOop instanceKlass::uncached_lookup_method(symbolOop name, symbolOop signature) const {
986 klassOop klass = as_klassOop();
987 while (klass != NULL) {
988 methodOop method = instanceKlass::cast(klass)->find_method(name, signature);
989 if (method != NULL) return method;
990 klass = instanceKlass::cast(klass)->super();
991 }
992 return NULL;
993 }
995 // lookup a method in all the interfaces that this class implements
996 methodOop instanceKlass::lookup_method_in_all_interfaces(symbolOop name,
997 symbolOop signature) const {
998 objArrayOop all_ifs = instanceKlass::cast(as_klassOop())->transitive_interfaces();
999 int num_ifs = all_ifs->length();
1000 instanceKlass *ik = NULL;
1001 for (int i = 0; i < num_ifs; i++) {
1002 ik = instanceKlass::cast(klassOop(all_ifs->obj_at(i)));
1003 methodOop m = ik->lookup_method(name, signature);
1004 if (m != NULL) {
1005 return m;
1006 }
1007 }
1008 return NULL;
1009 }
1011 /* jni_id_for_impl for jfieldIds only */
1012 JNIid* instanceKlass::jni_id_for_impl(instanceKlassHandle this_oop, int offset) {
1013 MutexLocker ml(JfieldIdCreation_lock);
1014 // Retry lookup after we got the lock
1015 JNIid* probe = this_oop->jni_ids() == NULL ? NULL : this_oop->jni_ids()->find(offset);
1016 if (probe == NULL) {
1017 // Slow case, allocate new static field identifier
1018 probe = new JNIid(this_oop->as_klassOop(), offset, this_oop->jni_ids());
1019 this_oop->set_jni_ids(probe);
1020 }
1021 return probe;
1022 }
1025 /* jni_id_for for jfieldIds only */
1026 JNIid* instanceKlass::jni_id_for(int offset) {
1027 JNIid* probe = jni_ids() == NULL ? NULL : jni_ids()->find(offset);
1028 if (probe == NULL) {
1029 probe = jni_id_for_impl(this->as_klassOop(), offset);
1030 }
1031 return probe;
1032 }
1035 // Lookup or create a jmethodID.
1036 // This code is called by the VMThread and JavaThreads so the
1037 // locking has to be done very carefully to avoid deadlocks
1038 // and/or other cache consistency problems.
1039 //
1040 jmethodID instanceKlass::get_jmethod_id(instanceKlassHandle ik_h, methodHandle method_h) {
1041 size_t idnum = (size_t)method_h->method_idnum();
1042 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1043 size_t length = 0;
1044 jmethodID id = NULL;
1046 // We use a double-check locking idiom here because this cache is
1047 // performance sensitive. In the normal system, this cache only
1048 // transitions from NULL to non-NULL which is safe because we use
1049 // release_set_methods_jmethod_ids() to advertise the new cache.
1050 // A partially constructed cache should never be seen by a racing
1051 // thread. We also use release_store_ptr() to save a new jmethodID
1052 // in the cache so a partially constructed jmethodID should never be
1053 // seen either. Cache reads of existing jmethodIDs proceed without a
1054 // lock, but cache writes of a new jmethodID requires uniqueness and
1055 // creation of the cache itself requires no leaks so a lock is
1056 // generally acquired in those two cases.
1057 //
1058 // If the RedefineClasses() API has been used, then this cache can
1059 // grow and we'll have transitions from non-NULL to bigger non-NULL.
1060 // Cache creation requires no leaks and we require safety between all
1061 // cache accesses and freeing of the old cache so a lock is generally
1062 // acquired when the RedefineClasses() API has been used.
1064 if (jmeths != NULL) {
1065 // the cache already exists
1066 if (!ik_h->idnum_can_increment()) {
1067 // the cache can't grow so we can just get the current values
1068 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1069 } else {
1070 // cache can grow so we have to be more careful
1071 if (Threads::number_of_threads() == 0 ||
1072 SafepointSynchronize::is_at_safepoint()) {
1073 // we're single threaded or at a safepoint - no locking needed
1074 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1075 } else {
1076 MutexLocker ml(JmethodIdCreation_lock);
1077 get_jmethod_id_length_value(jmeths, idnum, &length, &id);
1078 }
1079 }
1080 }
1081 // implied else:
1082 // we need to allocate a cache so default length and id values are good
1084 if (jmeths == NULL || // no cache yet
1085 length <= idnum || // cache is too short
1086 id == NULL) { // cache doesn't contain entry
1088 // This function can be called by the VMThread so we have to do all
1089 // things that might block on a safepoint before grabbing the lock.
1090 // Otherwise, we can deadlock with the VMThread or have a cache
1091 // consistency issue. These vars keep track of what we might have
1092 // to free after the lock is dropped.
1093 jmethodID to_dealloc_id = NULL;
1094 jmethodID* to_dealloc_jmeths = NULL;
1096 // may not allocate new_jmeths or use it if we allocate it
1097 jmethodID* new_jmeths = NULL;
1098 if (length <= idnum) {
1099 // allocate a new cache that might be used
1100 size_t size = MAX2(idnum+1, (size_t)ik_h->idnum_allocated_count());
1101 new_jmeths = NEW_C_HEAP_ARRAY(jmethodID, size+1);
1102 memset(new_jmeths, 0, (size+1)*sizeof(jmethodID));
1103 // cache size is stored in element[0], other elements offset by one
1104 new_jmeths[0] = (jmethodID)size;
1105 }
1107 // allocate a new jmethodID that might be used
1108 jmethodID new_id = NULL;
1109 if (method_h->is_old() && !method_h->is_obsolete()) {
1110 // The method passed in is old (but not obsolete), we need to use the current version
1111 methodOop current_method = ik_h->method_with_idnum((int)idnum);
1112 assert(current_method != NULL, "old and but not obsolete, so should exist");
1113 methodHandle current_method_h(current_method == NULL? method_h() : current_method);
1114 new_id = JNIHandles::make_jmethod_id(current_method_h);
1115 } else {
1116 // It is the current version of the method or an obsolete method,
1117 // use the version passed in
1118 new_id = JNIHandles::make_jmethod_id(method_h);
1119 }
1121 if (Threads::number_of_threads() == 0 ||
1122 SafepointSynchronize::is_at_safepoint()) {
1123 // we're single threaded or at a safepoint - no locking needed
1124 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1125 &to_dealloc_id, &to_dealloc_jmeths);
1126 } else {
1127 MutexLocker ml(JmethodIdCreation_lock);
1128 id = get_jmethod_id_fetch_or_update(ik_h, idnum, new_id, new_jmeths,
1129 &to_dealloc_id, &to_dealloc_jmeths);
1130 }
1132 // The lock has been dropped so we can free resources.
1133 // Free up either the old cache or the new cache if we allocated one.
1134 if (to_dealloc_jmeths != NULL) {
1135 FreeHeap(to_dealloc_jmeths);
1136 }
1137 // free up the new ID since it wasn't needed
1138 if (to_dealloc_id != NULL) {
1139 JNIHandles::destroy_jmethod_id(to_dealloc_id);
1140 }
1141 }
1142 return id;
1143 }
1146 // Common code to fetch the jmethodID from the cache or update the
1147 // cache with the new jmethodID. This function should never do anything
1148 // that causes the caller to go to a safepoint or we can deadlock with
1149 // the VMThread or have cache consistency issues.
1150 //
1151 jmethodID instanceKlass::get_jmethod_id_fetch_or_update(
1152 instanceKlassHandle ik_h, size_t idnum, jmethodID new_id,
1153 jmethodID* new_jmeths, jmethodID* to_dealloc_id_p,
1154 jmethodID** to_dealloc_jmeths_p) {
1155 assert(new_id != NULL, "sanity check");
1156 assert(to_dealloc_id_p != NULL, "sanity check");
1157 assert(to_dealloc_jmeths_p != NULL, "sanity check");
1158 assert(Threads::number_of_threads() == 0 ||
1159 SafepointSynchronize::is_at_safepoint() ||
1160 JmethodIdCreation_lock->owned_by_self(), "sanity check");
1162 // reacquire the cache - we are locked, single threaded or at a safepoint
1163 jmethodID* jmeths = ik_h->methods_jmethod_ids_acquire();
1164 jmethodID id = NULL;
1165 size_t length = 0;
1167 if (jmeths == NULL || // no cache yet
1168 (length = (size_t)jmeths[0]) <= idnum) { // cache is too short
1169 if (jmeths != NULL) {
1170 // copy any existing entries from the old cache
1171 for (size_t index = 0; index < length; index++) {
1172 new_jmeths[index+1] = jmeths[index+1];
1173 }
1174 *to_dealloc_jmeths_p = jmeths; // save old cache for later delete
1175 }
1176 ik_h->release_set_methods_jmethod_ids(jmeths = new_jmeths);
1177 } else {
1178 // fetch jmethodID (if any) from the existing cache
1179 id = jmeths[idnum+1];
1180 *to_dealloc_jmeths_p = new_jmeths; // save new cache for later delete
1181 }
1182 if (id == NULL) {
1183 // No matching jmethodID in the existing cache or we have a new
1184 // cache or we just grew the cache. This cache write is done here
1185 // by the first thread to win the foot race because a jmethodID
1186 // needs to be unique once it is generally available.
1187 id = new_id;
1189 // The jmethodID cache can be read while unlocked so we have to
1190 // make sure the new jmethodID is complete before installing it
1191 // in the cache.
1192 OrderAccess::release_store_ptr(&jmeths[idnum+1], id);
1193 } else {
1194 *to_dealloc_id_p = new_id; // save new id for later delete
1195 }
1196 return id;
1197 }
1200 // Common code to get the jmethodID cache length and the jmethodID
1201 // value at index idnum if there is one.
1202 //
1203 void instanceKlass::get_jmethod_id_length_value(jmethodID* cache,
1204 size_t idnum, size_t *length_p, jmethodID* id_p) {
1205 assert(cache != NULL, "sanity check");
1206 assert(length_p != NULL, "sanity check");
1207 assert(id_p != NULL, "sanity check");
1209 // cache size is stored in element[0], other elements offset by one
1210 *length_p = (size_t)cache[0];
1211 if (*length_p <= idnum) { // cache is too short
1212 *id_p = NULL;
1213 } else {
1214 *id_p = cache[idnum+1]; // fetch jmethodID (if any)
1215 }
1216 }
1219 // Lookup a jmethodID, NULL if not found. Do no blocking, no allocations, no handles
1220 jmethodID instanceKlass::jmethod_id_or_null(methodOop method) {
1221 size_t idnum = (size_t)method->method_idnum();
1222 jmethodID* jmeths = methods_jmethod_ids_acquire();
1223 size_t length; // length assigned as debugging crumb
1224 jmethodID id = NULL;
1225 if (jmeths != NULL && // If there is a cache
1226 (length = (size_t)jmeths[0]) > idnum) { // and if it is long enough,
1227 id = jmeths[idnum+1]; // Look up the id (may be NULL)
1228 }
1229 return id;
1230 }
1233 // Cache an itable index
1234 void instanceKlass::set_cached_itable_index(size_t idnum, int index) {
1235 int* indices = methods_cached_itable_indices_acquire();
1236 int* to_dealloc_indices = NULL;
1238 // We use a double-check locking idiom here because this cache is
1239 // performance sensitive. In the normal system, this cache only
1240 // transitions from NULL to non-NULL which is safe because we use
1241 // release_set_methods_cached_itable_indices() to advertise the
1242 // new cache. A partially constructed cache should never be seen
1243 // by a racing thread. Cache reads and writes proceed without a
1244 // lock, but creation of the cache itself requires no leaks so a
1245 // lock is generally acquired in that case.
1246 //
1247 // If the RedefineClasses() API has been used, then this cache can
1248 // grow and we'll have transitions from non-NULL to bigger non-NULL.
1249 // Cache creation requires no leaks and we require safety between all
1250 // cache accesses and freeing of the old cache so a lock is generally
1251 // acquired when the RedefineClasses() API has been used.
1253 if (indices == NULL || idnum_can_increment()) {
1254 // we need a cache or the cache can grow
1255 MutexLocker ml(JNICachedItableIndex_lock);
1256 // reacquire the cache to see if another thread already did the work
1257 indices = methods_cached_itable_indices_acquire();
1258 size_t length = 0;
1259 // cache size is stored in element[0], other elements offset by one
1260 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
1261 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
1262 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1);
1263 new_indices[0] = (int)size;
1264 // copy any existing entries
1265 size_t i;
1266 for (i = 0; i < length; i++) {
1267 new_indices[i+1] = indices[i+1];
1268 }
1269 // Set all the rest to -1
1270 for (i = length; i < size; i++) {
1271 new_indices[i+1] = -1;
1272 }
1273 if (indices != NULL) {
1274 // We have an old cache to delete so save it for after we
1275 // drop the lock.
1276 to_dealloc_indices = indices;
1277 }
1278 release_set_methods_cached_itable_indices(indices = new_indices);
1279 }
1281 if (idnum_can_increment()) {
1282 // this cache can grow so we have to write to it safely
1283 indices[idnum+1] = index;
1284 }
1285 } else {
1286 CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
1287 }
1289 if (!idnum_can_increment()) {
1290 // The cache cannot grow and this JNI itable index value does not
1291 // have to be unique like a jmethodID. If there is a race to set it,
1292 // it doesn't matter.
1293 indices[idnum+1] = index;
1294 }
1296 if (to_dealloc_indices != NULL) {
1297 // we allocated a new cache so free the old one
1298 FreeHeap(to_dealloc_indices);
1299 }
1300 }
1303 // Retrieve a cached itable index
1304 int instanceKlass::cached_itable_index(size_t idnum) {
1305 int* indices = methods_cached_itable_indices_acquire();
1306 if (indices != NULL && ((size_t)indices[0]) > idnum) {
1307 // indices exist and are long enough, retrieve possible cached
1308 return indices[idnum+1];
1309 }
1310 return -1;
1311 }
1314 //
1315 // nmethodBucket is used to record dependent nmethods for
1316 // deoptimization. nmethod dependencies are actually <klass, method>
1317 // pairs but we really only care about the klass part for purposes of
1318 // finding nmethods which might need to be deoptimized. Instead of
1319 // recording the method, a count of how many times a particular nmethod
1320 // was recorded is kept. This ensures that any recording errors are
1321 // noticed since an nmethod should be removed as many times are it's
1322 // added.
1323 //
1324 class nmethodBucket {
1325 private:
1326 nmethod* _nmethod;
1327 int _count;
1328 nmethodBucket* _next;
1330 public:
1331 nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
1332 _nmethod = nmethod;
1333 _next = next;
1334 _count = 1;
1335 }
1336 int count() { return _count; }
1337 int increment() { _count += 1; return _count; }
1338 int decrement() { _count -= 1; assert(_count >= 0, "don't underflow"); return _count; }
1339 nmethodBucket* next() { return _next; }
1340 void set_next(nmethodBucket* b) { _next = b; }
1341 nmethod* get_nmethod() { return _nmethod; }
1342 };
1345 //
1346 // Walk the list of dependent nmethods searching for nmethods which
1347 // are dependent on the klassOop that was passed in and mark them for
1348 // deoptimization. Returns the number of nmethods found.
1349 //
1350 int instanceKlass::mark_dependent_nmethods(DepChange& changes) {
1351 assert_locked_or_safepoint(CodeCache_lock);
1352 int found = 0;
1353 nmethodBucket* b = _dependencies;
1354 while (b != NULL) {
1355 nmethod* nm = b->get_nmethod();
1356 // since dependencies aren't removed until an nmethod becomes a zombie,
1357 // the dependency list may contain nmethods which aren't alive.
1358 if (nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
1359 if (TraceDependencies) {
1360 ResourceMark rm;
1361 tty->print_cr("Marked for deoptimization");
1362 tty->print_cr(" context = %s", this->external_name());
1363 changes.print();
1364 nm->print();
1365 nm->print_dependencies();
1366 }
1367 nm->mark_for_deoptimization();
1368 found++;
1369 }
1370 b = b->next();
1371 }
1372 return found;
1373 }
1376 //
1377 // Add an nmethodBucket to the list of dependencies for this nmethod.
1378 // It's possible that an nmethod has multiple dependencies on this klass
1379 // so a count is kept for each bucket to guarantee that creation and
1380 // deletion of dependencies is consistent.
1381 //
1382 void instanceKlass::add_dependent_nmethod(nmethod* nm) {
1383 assert_locked_or_safepoint(CodeCache_lock);
1384 nmethodBucket* b = _dependencies;
1385 nmethodBucket* last = NULL;
1386 while (b != NULL) {
1387 if (nm == b->get_nmethod()) {
1388 b->increment();
1389 return;
1390 }
1391 b = b->next();
1392 }
1393 _dependencies = new nmethodBucket(nm, _dependencies);
1394 }
1397 //
1398 // Decrement count of the nmethod in the dependency list and remove
1399 // the bucket competely when the count goes to 0. This method must
1400 // find a corresponding bucket otherwise there's a bug in the
1401 // recording of dependecies.
1402 //
1403 void instanceKlass::remove_dependent_nmethod(nmethod* nm) {
1404 assert_locked_or_safepoint(CodeCache_lock);
1405 nmethodBucket* b = _dependencies;
1406 nmethodBucket* last = NULL;
1407 while (b != NULL) {
1408 if (nm == b->get_nmethod()) {
1409 if (b->decrement() == 0) {
1410 if (last == NULL) {
1411 _dependencies = b->next();
1412 } else {
1413 last->set_next(b->next());
1414 }
1415 delete b;
1416 }
1417 return;
1418 }
1419 last = b;
1420 b = b->next();
1421 }
1422 #ifdef ASSERT
1423 tty->print_cr("### %s can't find dependent nmethod:", this->external_name());
1424 nm->print();
1425 #endif // ASSERT
1426 ShouldNotReachHere();
1427 }
1430 #ifndef PRODUCT
1431 void instanceKlass::print_dependent_nmethods(bool verbose) {
1432 nmethodBucket* b = _dependencies;
1433 int idx = 0;
1434 while (b != NULL) {
1435 nmethod* nm = b->get_nmethod();
1436 tty->print("[%d] count=%d { ", idx++, b->count());
1437 if (!verbose) {
1438 nm->print_on(tty, "nmethod");
1439 tty->print_cr(" } ");
1440 } else {
1441 nm->print();
1442 nm->print_dependencies();
1443 tty->print_cr("--- } ");
1444 }
1445 b = b->next();
1446 }
1447 }
1450 bool instanceKlass::is_dependent_nmethod(nmethod* nm) {
1451 nmethodBucket* b = _dependencies;
1452 while (b != NULL) {
1453 if (nm == b->get_nmethod()) {
1454 return true;
1455 }
1456 b = b->next();
1457 }
1458 return false;
1459 }
1460 #endif //PRODUCT
1463 #ifdef ASSERT
1464 template <class T> void assert_is_in(T *p) {
1465 T heap_oop = oopDesc::load_heap_oop(p);
1466 if (!oopDesc::is_null(heap_oop)) {
1467 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1468 assert(Universe::heap()->is_in(o), "should be in heap");
1469 }
1470 }
1471 template <class T> void assert_is_in_closed_subset(T *p) {
1472 T heap_oop = oopDesc::load_heap_oop(p);
1473 if (!oopDesc::is_null(heap_oop)) {
1474 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1475 assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
1476 }
1477 }
1478 template <class T> void assert_is_in_reserved(T *p) {
1479 T heap_oop = oopDesc::load_heap_oop(p);
1480 if (!oopDesc::is_null(heap_oop)) {
1481 oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
1482 assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
1483 }
1484 }
1485 template <class T> void assert_nothing(T *p) {}
1487 #else
1488 template <class T> void assert_is_in(T *p) {}
1489 template <class T> void assert_is_in_closed_subset(T *p) {}
1490 template <class T> void assert_is_in_reserved(T *p) {}
1491 template <class T> void assert_nothing(T *p) {}
1492 #endif // ASSERT
1494 //
1495 // Macros that iterate over areas of oops which are specialized on type of
1496 // oop pointer either narrow or wide, depending on UseCompressedOops
1497 //
1498 // Parameters are:
1499 // T - type of oop to point to (either oop or narrowOop)
1500 // start_p - starting pointer for region to iterate over
1501 // count - number of oops or narrowOops to iterate over
1502 // do_oop - action to perform on each oop (it's arbitrary C code which
1503 // makes it more efficient to put in a macro rather than making
1504 // it a template function)
1505 // assert_fn - assert function which is template function because performance
1506 // doesn't matter when enabled.
1507 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
1508 T, start_p, count, do_oop, \
1509 assert_fn) \
1510 { \
1511 T* p = (T*)(start_p); \
1512 T* const end = p + (count); \
1513 while (p < end) { \
1514 (assert_fn)(p); \
1515 do_oop; \
1516 ++p; \
1517 } \
1518 }
1520 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
1521 T, start_p, count, do_oop, \
1522 assert_fn) \
1523 { \
1524 T* const start = (T*)(start_p); \
1525 T* p = start + (count); \
1526 while (start < p) { \
1527 --p; \
1528 (assert_fn)(p); \
1529 do_oop; \
1530 } \
1531 }
1533 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
1534 T, start_p, count, low, high, \
1535 do_oop, assert_fn) \
1536 { \
1537 T* const l = (T*)(low); \
1538 T* const h = (T*)(high); \
1539 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
1540 mask_bits((intptr_t)h, sizeof(T)-1) == 0, \
1541 "bounded region must be properly aligned"); \
1542 T* p = (T*)(start_p); \
1543 T* end = p + (count); \
1544 if (p < l) p = l; \
1545 if (end > h) end = h; \
1546 while (p < end) { \
1547 (assert_fn)(p); \
1548 do_oop; \
1549 ++p; \
1550 } \
1551 }
1554 // The following macros call specialized macros, passing either oop or
1555 // narrowOop as the specialization type. These test the UseCompressedOops
1556 // flag.
1557 #define InstanceKlass_OOP_ITERATE(start_p, count, \
1558 do_oop, assert_fn) \
1559 { \
1560 if (UseCompressedOops) { \
1561 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1562 start_p, count, \
1563 do_oop, assert_fn) \
1564 } else { \
1565 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1566 start_p, count, \
1567 do_oop, assert_fn) \
1568 } \
1569 }
1571 #define InstanceKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
1572 do_oop, assert_fn) \
1573 { \
1574 if (UseCompressedOops) { \
1575 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1576 start_p, count, \
1577 low, high, \
1578 do_oop, assert_fn) \
1579 } else { \
1580 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1581 start_p, count, \
1582 low, high, \
1583 do_oop, assert_fn) \
1584 } \
1585 }
1587 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \
1588 { \
1589 /* Compute oopmap block range. The common case \
1590 is nonstatic_oop_map_size == 1. */ \
1591 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1592 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
1593 if (UseCompressedOops) { \
1594 while (map < end_map) { \
1595 InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
1596 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1597 do_oop, assert_fn) \
1598 ++map; \
1599 } \
1600 } else { \
1601 while (map < end_map) { \
1602 InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \
1603 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1604 do_oop, assert_fn) \
1605 ++map; \
1606 } \
1607 } \
1608 }
1610 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \
1611 { \
1612 OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \
1613 OopMapBlock* map = start_map + nonstatic_oop_map_count(); \
1614 if (UseCompressedOops) { \
1615 while (start_map < map) { \
1616 --map; \
1617 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \
1618 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1619 do_oop, assert_fn) \
1620 } \
1621 } else { \
1622 while (start_map < map) { \
1623 --map; \
1624 InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \
1625 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1626 do_oop, assert_fn) \
1627 } \
1628 } \
1629 }
1631 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \
1632 assert_fn) \
1633 { \
1634 /* Compute oopmap block range. The common case is \
1635 nonstatic_oop_map_size == 1, so we accept the \
1636 usually non-existent extra overhead of examining \
1637 all the maps. */ \
1638 OopMapBlock* map = start_of_nonstatic_oop_maps(); \
1639 OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \
1640 if (UseCompressedOops) { \
1641 while (map < end_map) { \
1642 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \
1643 obj->obj_field_addr<narrowOop>(map->offset()), map->count(), \
1644 low, high, \
1645 do_oop, assert_fn) \
1646 ++map; \
1647 } \
1648 } else { \
1649 while (map < end_map) { \
1650 InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \
1651 obj->obj_field_addr<oop>(map->offset()), map->count(), \
1652 low, high, \
1653 do_oop, assert_fn) \
1654 ++map; \
1655 } \
1656 } \
1657 }
1659 void instanceKlass::follow_static_fields() {
1660 InstanceKlass_OOP_ITERATE( \
1661 start_of_static_fields(), static_oop_field_size(), \
1662 MarkSweep::mark_and_push(p), \
1663 assert_is_in_closed_subset)
1664 }
1666 #ifndef SERIALGC
1667 void instanceKlass::follow_static_fields(ParCompactionManager* cm) {
1668 InstanceKlass_OOP_ITERATE( \
1669 start_of_static_fields(), static_oop_field_size(), \
1670 PSParallelCompact::mark_and_push(cm, p), \
1671 assert_is_in)
1672 }
1673 #endif // SERIALGC
1675 void instanceKlass::adjust_static_fields() {
1676 InstanceKlass_OOP_ITERATE( \
1677 start_of_static_fields(), static_oop_field_size(), \
1678 MarkSweep::adjust_pointer(p), \
1679 assert_nothing)
1680 }
1682 #ifndef SERIALGC
1683 void instanceKlass::update_static_fields() {
1684 InstanceKlass_OOP_ITERATE( \
1685 start_of_static_fields(), static_oop_field_size(), \
1686 PSParallelCompact::adjust_pointer(p), \
1687 assert_nothing)
1688 }
1690 void instanceKlass::update_static_fields(HeapWord* beg_addr, HeapWord* end_addr) {
1691 InstanceKlass_BOUNDED_OOP_ITERATE( \
1692 start_of_static_fields(), static_oop_field_size(), \
1693 beg_addr, end_addr, \
1694 PSParallelCompact::adjust_pointer(p), \
1695 assert_nothing )
1696 }
1697 #endif // SERIALGC
1699 void instanceKlass::oop_follow_contents(oop obj) {
1700 assert(obj != NULL, "can't follow the content of NULL object");
1701 obj->follow_header();
1702 InstanceKlass_OOP_MAP_ITERATE( \
1703 obj, \
1704 MarkSweep::mark_and_push(p), \
1705 assert_is_in_closed_subset)
1706 }
1708 #ifndef SERIALGC
1709 void instanceKlass::oop_follow_contents(ParCompactionManager* cm,
1710 oop obj) {
1711 assert(obj != NULL, "can't follow the content of NULL object");
1712 obj->follow_header(cm);
1713 InstanceKlass_OOP_MAP_ITERATE( \
1714 obj, \
1715 PSParallelCompact::mark_and_push(cm, p), \
1716 assert_is_in)
1717 }
1718 #endif // SERIALGC
1720 // closure's do_header() method dicates whether the given closure should be
1721 // applied to the klass ptr in the object header.
1723 #define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
1724 \
1725 int instanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \
1726 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1727 /* header */ \
1728 if (closure->do_header()) { \
1729 obj->oop_iterate_header(closure); \
1730 } \
1731 InstanceKlass_OOP_MAP_ITERATE( \
1732 obj, \
1733 SpecializationStats:: \
1734 record_do_oop_call##nv_suffix(SpecializationStats::ik); \
1735 (closure)->do_oop##nv_suffix(p), \
1736 assert_is_in_closed_subset) \
1737 return size_helper(); \
1738 }
1740 #ifndef SERIALGC
1741 #define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
1742 \
1743 int instanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \
1744 OopClosureType* closure) { \
1745 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik); \
1746 /* header */ \
1747 if (closure->do_header()) { \
1748 obj->oop_iterate_header(closure); \
1749 } \
1750 /* instance variables */ \
1751 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1752 obj, \
1753 SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::ik);\
1754 (closure)->do_oop##nv_suffix(p), \
1755 assert_is_in_closed_subset) \
1756 return size_helper(); \
1757 }
1758 #endif // !SERIALGC
1760 #define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \
1761 \
1762 int instanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \
1763 OopClosureType* closure, \
1764 MemRegion mr) { \
1765 SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::ik);\
1766 if (closure->do_header()) { \
1767 obj->oop_iterate_header(closure, mr); \
1768 } \
1769 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1770 obj, mr.start(), mr.end(), \
1771 (closure)->do_oop##nv_suffix(p), \
1772 assert_is_in_closed_subset) \
1773 return size_helper(); \
1774 }
1776 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1777 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN)
1778 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1779 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m)
1780 #ifndef SERIALGC
1781 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1782 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
1783 #endif // !SERIALGC
1785 void instanceKlass::iterate_static_fields(OopClosure* closure) {
1786 InstanceKlass_OOP_ITERATE( \
1787 start_of_static_fields(), static_oop_field_size(), \
1788 closure->do_oop(p), \
1789 assert_is_in_reserved)
1790 }
1792 void instanceKlass::iterate_static_fields(OopClosure* closure,
1793 MemRegion mr) {
1794 InstanceKlass_BOUNDED_OOP_ITERATE( \
1795 start_of_static_fields(), static_oop_field_size(), \
1796 mr.start(), mr.end(), \
1797 (closure)->do_oop_v(p), \
1798 assert_is_in_closed_subset)
1799 }
1801 int instanceKlass::oop_adjust_pointers(oop obj) {
1802 int size = size_helper();
1803 InstanceKlass_OOP_MAP_ITERATE( \
1804 obj, \
1805 MarkSweep::adjust_pointer(p), \
1806 assert_is_in)
1807 obj->adjust_header();
1808 return size;
1809 }
1811 #ifndef SERIALGC
1812 void instanceKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
1813 assert(!pm->depth_first(), "invariant");
1814 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1815 obj, \
1816 if (PSScavenge::should_scavenge(p)) { \
1817 pm->claim_or_forward_breadth(p); \
1818 }, \
1819 assert_nothing )
1820 }
1822 void instanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
1823 assert(pm->depth_first(), "invariant");
1824 InstanceKlass_OOP_MAP_REVERSE_ITERATE( \
1825 obj, \
1826 if (PSScavenge::should_scavenge(p)) { \
1827 pm->claim_or_forward_depth(p); \
1828 }, \
1829 assert_nothing )
1830 }
1832 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
1833 InstanceKlass_OOP_MAP_ITERATE( \
1834 obj, \
1835 PSParallelCompact::adjust_pointer(p), \
1836 assert_nothing)
1837 return size_helper();
1838 }
1840 int instanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj,
1841 HeapWord* beg_addr, HeapWord* end_addr) {
1842 InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \
1843 obj, beg_addr, end_addr, \
1844 PSParallelCompact::adjust_pointer(p), \
1845 assert_nothing)
1846 return size_helper();
1847 }
1849 void instanceKlass::copy_static_fields(PSPromotionManager* pm) {
1850 assert(!pm->depth_first(), "invariant");
1851 InstanceKlass_OOP_ITERATE( \
1852 start_of_static_fields(), static_oop_field_size(), \
1853 if (PSScavenge::should_scavenge(p)) { \
1854 pm->claim_or_forward_breadth(p); \
1855 }, \
1856 assert_nothing )
1857 }
1859 void instanceKlass::push_static_fields(PSPromotionManager* pm) {
1860 assert(pm->depth_first(), "invariant");
1861 InstanceKlass_OOP_ITERATE( \
1862 start_of_static_fields(), static_oop_field_size(), \
1863 if (PSScavenge::should_scavenge(p)) { \
1864 pm->claim_or_forward_depth(p); \
1865 }, \
1866 assert_nothing )
1867 }
1869 void instanceKlass::copy_static_fields(ParCompactionManager* cm) {
1870 InstanceKlass_OOP_ITERATE( \
1871 start_of_static_fields(), static_oop_field_size(), \
1872 PSParallelCompact::adjust_pointer(p), \
1873 assert_is_in)
1874 }
1875 #endif // SERIALGC
1877 // This klass is alive but the implementor link is not followed/updated.
1878 // Subklass and sibling links are handled by Klass::follow_weak_klass_links
1880 void instanceKlass::follow_weak_klass_links(
1881 BoolObjectClosure* is_alive, OopClosure* keep_alive) {
1882 assert(is_alive->do_object_b(as_klassOop()), "this oop should be live");
1883 if (ClassUnloading) {
1884 for (int i = 0; i < implementors_limit; i++) {
1885 klassOop impl = _implementors[i];
1886 if (impl == NULL) break; // no more in the list
1887 if (!is_alive->do_object_b(impl)) {
1888 // remove this guy from the list by overwriting him with the tail
1889 int lasti = --_nof_implementors;
1890 assert(lasti >= i && lasti < implementors_limit, "just checking");
1891 _implementors[i] = _implementors[lasti];
1892 _implementors[lasti] = NULL;
1893 --i; // rerun the loop at this index
1894 }
1895 }
1896 } else {
1897 for (int i = 0; i < implementors_limit; i++) {
1898 keep_alive->do_oop(&adr_implementors()[i]);
1899 }
1900 }
1901 Klass::follow_weak_klass_links(is_alive, keep_alive);
1902 }
1904 void instanceKlass::remove_unshareable_info() {
1905 Klass::remove_unshareable_info();
1906 init_implementor();
1907 }
1909 static void clear_all_breakpoints(methodOop m) {
1910 m->clear_all_breakpoints();
1911 }
1913 void instanceKlass::release_C_heap_structures() {
1914 // Deallocate oop map cache
1915 if (_oop_map_cache != NULL) {
1916 delete _oop_map_cache;
1917 _oop_map_cache = NULL;
1918 }
1920 // Deallocate JNI identifiers for jfieldIDs
1921 JNIid::deallocate(jni_ids());
1922 set_jni_ids(NULL);
1924 jmethodID* jmeths = methods_jmethod_ids_acquire();
1925 if (jmeths != (jmethodID*)NULL) {
1926 release_set_methods_jmethod_ids(NULL);
1927 FreeHeap(jmeths);
1928 }
1930 int* indices = methods_cached_itable_indices_acquire();
1931 if (indices != (int*)NULL) {
1932 release_set_methods_cached_itable_indices(NULL);
1933 FreeHeap(indices);
1934 }
1936 // release dependencies
1937 nmethodBucket* b = _dependencies;
1938 _dependencies = NULL;
1939 while (b != NULL) {
1940 nmethodBucket* next = b->next();
1941 delete b;
1942 b = next;
1943 }
1945 // Deallocate breakpoint records
1946 if (breakpoints() != 0x0) {
1947 methods_do(clear_all_breakpoints);
1948 assert(breakpoints() == 0x0, "should have cleared breakpoints");
1949 }
1951 // deallocate information about previous versions
1952 if (_previous_versions != NULL) {
1953 for (int i = _previous_versions->length() - 1; i >= 0; i--) {
1954 PreviousVersionNode * pv_node = _previous_versions->at(i);
1955 delete pv_node;
1956 }
1957 delete _previous_versions;
1958 _previous_versions = NULL;
1959 }
1961 // deallocate the cached class file
1962 if (_cached_class_file_bytes != NULL) {
1963 os::free(_cached_class_file_bytes);
1964 _cached_class_file_bytes = NULL;
1965 _cached_class_file_len = 0;
1966 }
1967 }
1969 const char* instanceKlass::signature_name() const {
1970 const char* src = (const char*) (name()->as_C_string());
1971 const int src_length = (int)strlen(src);
1972 char* dest = NEW_RESOURCE_ARRAY(char, src_length + 3);
1973 int src_index = 0;
1974 int dest_index = 0;
1975 dest[dest_index++] = 'L';
1976 while (src_index < src_length) {
1977 dest[dest_index++] = src[src_index++];
1978 }
1979 dest[dest_index++] = ';';
1980 dest[dest_index] = '\0';
1981 return dest;
1982 }
1984 // different verisons of is_same_class_package
1985 bool instanceKlass::is_same_class_package(klassOop class2) {
1986 klassOop class1 = as_klassOop();
1987 oop classloader1 = instanceKlass::cast(class1)->class_loader();
1988 symbolOop classname1 = Klass::cast(class1)->name();
1990 if (Klass::cast(class2)->oop_is_objArray()) {
1991 class2 = objArrayKlass::cast(class2)->bottom_klass();
1992 }
1993 oop classloader2;
1994 if (Klass::cast(class2)->oop_is_instance()) {
1995 classloader2 = instanceKlass::cast(class2)->class_loader();
1996 } else {
1997 assert(Klass::cast(class2)->oop_is_typeArray(), "should be type array");
1998 classloader2 = NULL;
1999 }
2000 symbolOop classname2 = Klass::cast(class2)->name();
2002 return instanceKlass::is_same_class_package(classloader1, classname1,
2003 classloader2, classname2);
2004 }
2006 bool instanceKlass::is_same_class_package(oop classloader2, symbolOop classname2) {
2007 klassOop class1 = as_klassOop();
2008 oop classloader1 = instanceKlass::cast(class1)->class_loader();
2009 symbolOop classname1 = Klass::cast(class1)->name();
2011 return instanceKlass::is_same_class_package(classloader1, classname1,
2012 classloader2, classname2);
2013 }
2015 // return true if two classes are in the same package, classloader
2016 // and classname information is enough to determine a class's package
2017 bool instanceKlass::is_same_class_package(oop class_loader1, symbolOop class_name1,
2018 oop class_loader2, symbolOop class_name2) {
2019 if (class_loader1 != class_loader2) {
2020 return false;
2021 } else if (class_name1 == class_name2) {
2022 return true; // skip painful bytewise comparison
2023 } else {
2024 ResourceMark rm;
2026 // The symbolOop's are in UTF8 encoding. Since we only need to check explicitly
2027 // for ASCII characters ('/', 'L', '['), we can keep them in UTF8 encoding.
2028 // Otherwise, we just compare jbyte values between the strings.
2029 jbyte *name1 = class_name1->base();
2030 jbyte *name2 = class_name2->base();
2032 jbyte *last_slash1 = UTF8::strrchr(name1, class_name1->utf8_length(), '/');
2033 jbyte *last_slash2 = UTF8::strrchr(name2, class_name2->utf8_length(), '/');
2035 if ((last_slash1 == NULL) || (last_slash2 == NULL)) {
2036 // One of the two doesn't have a package. Only return true
2037 // if the other one also doesn't have a package.
2038 return last_slash1 == last_slash2;
2039 } else {
2040 // Skip over '['s
2041 if (*name1 == '[') {
2042 do {
2043 name1++;
2044 } while (*name1 == '[');
2045 if (*name1 != 'L') {
2046 // Something is terribly wrong. Shouldn't be here.
2047 return false;
2048 }
2049 }
2050 if (*name2 == '[') {
2051 do {
2052 name2++;
2053 } while (*name2 == '[');
2054 if (*name2 != 'L') {
2055 // Something is terribly wrong. Shouldn't be here.
2056 return false;
2057 }
2058 }
2060 // Check that package part is identical
2061 int length1 = last_slash1 - name1;
2062 int length2 = last_slash2 - name2;
2064 return UTF8::equal(name1, length1, name2, length2);
2065 }
2066 }
2067 }
2069 // Returns true iff super_method can be overridden by a method in targetclassname
2070 // See JSL 3rd edition 8.4.6.1
2071 // Assumes name-signature match
2072 // "this" is instanceKlass of super_method which must exist
2073 // note that the instanceKlass of the method in the targetclassname has not always been created yet
2074 bool instanceKlass::is_override(methodHandle super_method, Handle targetclassloader, symbolHandle targetclassname, TRAPS) {
2075 // Private methods can not be overridden
2076 if (super_method->is_private()) {
2077 return false;
2078 }
2079 // If super method is accessible, then override
2080 if ((super_method->is_protected()) ||
2081 (super_method->is_public())) {
2082 return true;
2083 }
2084 // Package-private methods are not inherited outside of package
2085 assert(super_method->is_package_private(), "must be package private");
2086 return(is_same_class_package(targetclassloader(), targetclassname()));
2087 }
2089 /* defined for now in jvm.cpp, for historical reasons *--
2090 klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle self,
2091 symbolOop& simple_name_result, TRAPS) {
2092 ...
2093 }
2094 */
2096 // tell if two classes have the same enclosing class (at package level)
2097 bool instanceKlass::is_same_package_member_impl(instanceKlassHandle class1,
2098 klassOop class2_oop, TRAPS) {
2099 if (class2_oop == class1->as_klassOop()) return true;
2100 if (!Klass::cast(class2_oop)->oop_is_instance()) return false;
2101 instanceKlassHandle class2(THREAD, class2_oop);
2103 // must be in same package before we try anything else
2104 if (!class1->is_same_class_package(class2->class_loader(), class2->name()))
2105 return false;
2107 // As long as there is an outer1.getEnclosingClass,
2108 // shift the search outward.
2109 instanceKlassHandle outer1 = class1;
2110 for (;;) {
2111 // As we walk along, look for equalities between outer1 and class2.
2112 // Eventually, the walks will terminate as outer1 stops
2113 // at the top-level class around the original class.
2114 bool ignore_inner_is_member;
2115 klassOop next = outer1->compute_enclosing_class(&ignore_inner_is_member,
2116 CHECK_false);
2117 if (next == NULL) break;
2118 if (next == class2()) return true;
2119 outer1 = instanceKlassHandle(THREAD, next);
2120 }
2122 // Now do the same for class2.
2123 instanceKlassHandle outer2 = class2;
2124 for (;;) {
2125 bool ignore_inner_is_member;
2126 klassOop next = outer2->compute_enclosing_class(&ignore_inner_is_member,
2127 CHECK_false);
2128 if (next == NULL) break;
2129 // Might as well check the new outer against all available values.
2130 if (next == class1()) return true;
2131 if (next == outer1()) return true;
2132 outer2 = instanceKlassHandle(THREAD, next);
2133 }
2135 // If by this point we have not found an equality between the
2136 // two classes, we know they are in separate package members.
2137 return false;
2138 }
2141 jint instanceKlass::compute_modifier_flags(TRAPS) const {
2142 klassOop k = as_klassOop();
2143 jint access = access_flags().as_int();
2145 // But check if it happens to be member class.
2146 typeArrayOop inner_class_list = inner_classes();
2147 int length = (inner_class_list == NULL) ? 0 : inner_class_list->length();
2148 assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
2149 if (length > 0) {
2150 typeArrayHandle inner_class_list_h(THREAD, inner_class_list);
2151 instanceKlassHandle ik(THREAD, k);
2152 for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) {
2153 int ioff = inner_class_list_h->ushort_at(
2154 i + instanceKlass::inner_class_inner_class_info_offset);
2156 // Inner class attribute can be zero, skip it.
2157 // Strange but true: JVM spec. allows null inner class refs.
2158 if (ioff == 0) continue;
2160 // only look at classes that are already loaded
2161 // since we are looking for the flags for our self.
2162 symbolOop inner_name = ik->constants()->klass_name_at(ioff);
2163 if ((ik->name() == inner_name)) {
2164 // This is really a member class.
2165 access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset);
2166 break;
2167 }
2168 }
2169 }
2170 // Remember to strip ACC_SUPER bit
2171 return (access & (~JVM_ACC_SUPER)) & JVM_ACC_WRITTEN_FLAGS;
2172 }
2174 jint instanceKlass::jvmti_class_status() const {
2175 jint result = 0;
2177 if (is_linked()) {
2178 result |= JVMTI_CLASS_STATUS_VERIFIED | JVMTI_CLASS_STATUS_PREPARED;
2179 }
2181 if (is_initialized()) {
2182 assert(is_linked(), "Class status is not consistent");
2183 result |= JVMTI_CLASS_STATUS_INITIALIZED;
2184 }
2185 if (is_in_error_state()) {
2186 result |= JVMTI_CLASS_STATUS_ERROR;
2187 }
2188 return result;
2189 }
2191 methodOop instanceKlass::method_at_itable(klassOop holder, int index, TRAPS) {
2192 itableOffsetEntry* ioe = (itableOffsetEntry*)start_of_itable();
2193 int method_table_offset_in_words = ioe->offset()/wordSize;
2194 int nof_interfaces = (method_table_offset_in_words - itable_offset_in_words())
2195 / itableOffsetEntry::size();
2197 for (int cnt = 0 ; ; cnt ++, ioe ++) {
2198 // If the interface isn't implemented by the receiver class,
2199 // the VM should throw IncompatibleClassChangeError.
2200 if (cnt >= nof_interfaces) {
2201 THROW_OOP_0(vmSymbols::java_lang_IncompatibleClassChangeError());
2202 }
2204 klassOop ik = ioe->interface_klass();
2205 if (ik == holder) break;
2206 }
2208 itableMethodEntry* ime = ioe->first_method_entry(as_klassOop());
2209 methodOop m = ime[index].method();
2210 if (m == NULL) {
2211 THROW_OOP_0(vmSymbols::java_lang_AbstractMethodError());
2212 }
2213 return m;
2214 }
2216 // On-stack replacement stuff
2217 void instanceKlass::add_osr_nmethod(nmethod* n) {
2218 // only one compilation can be active
2219 NEEDS_CLEANUP
2220 // This is a short non-blocking critical region, so the no safepoint check is ok.
2221 OsrList_lock->lock_without_safepoint_check();
2222 assert(n->is_osr_method(), "wrong kind of nmethod");
2223 n->set_osr_link(osr_nmethods_head());
2224 set_osr_nmethods_head(n);
2225 // Remember to unlock again
2226 OsrList_lock->unlock();
2227 }
2230 void instanceKlass::remove_osr_nmethod(nmethod* n) {
2231 // This is a short non-blocking critical region, so the no safepoint check is ok.
2232 OsrList_lock->lock_without_safepoint_check();
2233 assert(n->is_osr_method(), "wrong kind of nmethod");
2234 nmethod* last = NULL;
2235 nmethod* cur = osr_nmethods_head();
2236 // Search for match
2237 while(cur != NULL && cur != n) {
2238 last = cur;
2239 cur = cur->osr_link();
2240 }
2241 if (cur == n) {
2242 if (last == NULL) {
2243 // Remove first element
2244 set_osr_nmethods_head(osr_nmethods_head()->osr_link());
2245 } else {
2246 last->set_osr_link(cur->osr_link());
2247 }
2248 }
2249 n->set_osr_link(NULL);
2250 // Remember to unlock again
2251 OsrList_lock->unlock();
2252 }
2254 nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci) const {
2255 // This is a short non-blocking critical region, so the no safepoint check is ok.
2256 OsrList_lock->lock_without_safepoint_check();
2257 nmethod* osr = osr_nmethods_head();
2258 while (osr != NULL) {
2259 assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
2260 if (osr->method() == m &&
2261 (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
2262 // Found a match - return it.
2263 OsrList_lock->unlock();
2264 return osr;
2265 }
2266 osr = osr->osr_link();
2267 }
2268 OsrList_lock->unlock();
2269 return NULL;
2270 }
2272 // -----------------------------------------------------------------------------------------------------
2273 #ifndef PRODUCT
2275 // Printing
2277 #define BULLET " - "
2279 void FieldPrinter::do_field(fieldDescriptor* fd) {
2280 _st->print(BULLET);
2281 if (fd->is_static() || (_obj == NULL)) {
2282 fd->print_on(_st);
2283 _st->cr();
2284 } else {
2285 fd->print_on_for(_st, _obj);
2286 _st->cr();
2287 }
2288 }
2291 void instanceKlass::oop_print_on(oop obj, outputStream* st) {
2292 Klass::oop_print_on(obj, st);
2294 if (as_klassOop() == SystemDictionary::String_klass()) {
2295 typeArrayOop value = java_lang_String::value(obj);
2296 juint offset = java_lang_String::offset(obj);
2297 juint length = java_lang_String::length(obj);
2298 if (value != NULL &&
2299 value->is_typeArray() &&
2300 offset <= (juint) value->length() &&
2301 offset + length <= (juint) value->length()) {
2302 st->print(BULLET"string: ");
2303 Handle h_obj(obj);
2304 java_lang_String::print(h_obj, st);
2305 st->cr();
2306 if (!WizardMode) return; // that is enough
2307 }
2308 }
2310 st->print_cr(BULLET"---- fields (total size %d words):", oop_size(obj));
2311 FieldPrinter print_nonstatic_field(st, obj);
2312 do_nonstatic_fields(&print_nonstatic_field);
2314 if (as_klassOop() == SystemDictionary::Class_klass()) {
2315 st->print(BULLET"signature: ");
2316 java_lang_Class::print_signature(obj, st);
2317 st->cr();
2318 klassOop mirrored_klass = java_lang_Class::as_klassOop(obj);
2319 st->print(BULLET"fake entry for mirror: ");
2320 mirrored_klass->print_value_on(st);
2321 st->cr();
2322 st->print(BULLET"fake entry resolved_constructor: ");
2323 methodOop ctor = java_lang_Class::resolved_constructor(obj);
2324 ctor->print_value_on(st);
2325 klassOop array_klass = java_lang_Class::array_klass(obj);
2326 st->cr();
2327 st->print(BULLET"fake entry for array: ");
2328 array_klass->print_value_on(st);
2329 st->cr();
2330 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2331 st->print(BULLET"signature: ");
2332 java_dyn_MethodType::print_signature(obj, st);
2333 st->cr();
2334 }
2335 }
2337 #endif //PRODUCT
2339 void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
2340 st->print("a ");
2341 name()->print_value_on(st);
2342 obj->print_address_on(st);
2343 if (as_klassOop() == SystemDictionary::String_klass()
2344 && java_lang_String::value(obj) != NULL) {
2345 ResourceMark rm;
2346 int len = java_lang_String::length(obj);
2347 int plen = (len < 24 ? len : 12);
2348 char* str = java_lang_String::as_utf8_string(obj, 0, plen);
2349 st->print(" = \"%s\"", str);
2350 if (len > plen)
2351 st->print("...[%d]", len);
2352 } else if (as_klassOop() == SystemDictionary::Class_klass()) {
2353 klassOop k = java_lang_Class::as_klassOop(obj);
2354 st->print(" = ");
2355 if (k != NULL) {
2356 k->print_value_on(st);
2357 } else {
2358 const char* tname = type2name(java_lang_Class::primitive_type(obj));
2359 st->print("%s", tname ? tname : "type?");
2360 }
2361 } else if (as_klassOop() == SystemDictionary::MethodType_klass()) {
2362 st->print(" = ");
2363 java_dyn_MethodType::print_signature(obj, st);
2364 } else if (java_lang_boxing_object::is_instance(obj)) {
2365 st->print(" = ");
2366 java_lang_boxing_object::print(obj, st);
2367 }
2368 }
2370 const char* instanceKlass::internal_name() const {
2371 return external_name();
2372 }
2374 // Verification
2376 class VerifyFieldClosure: public OopClosure {
2377 protected:
2378 template <class T> void do_oop_work(T* p) {
2379 guarantee(Universe::heap()->is_in_closed_subset(p), "should be in heap");
2380 oop obj = oopDesc::load_decode_heap_oop(p);
2381 if (!obj->is_oop_or_null()) {
2382 tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p, (address)obj);
2383 Universe::print();
2384 guarantee(false, "boom");
2385 }
2386 }
2387 public:
2388 virtual void do_oop(oop* p) { VerifyFieldClosure::do_oop_work(p); }
2389 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
2390 };
2392 void instanceKlass::oop_verify_on(oop obj, outputStream* st) {
2393 Klass::oop_verify_on(obj, st);
2394 VerifyFieldClosure blk;
2395 oop_oop_iterate(obj, &blk);
2396 }
2398 #ifndef PRODUCT
2400 void instanceKlass::verify_class_klass_nonstatic_oop_maps(klassOop k) {
2401 // This verification code is disabled. JDK_Version::is_gte_jdk14x_version()
2402 // cannot be called since this function is called before the VM is
2403 // able to determine what JDK version is running with.
2404 // The check below always is false since 1.4.
2405 return;
2407 // This verification code temporarily disabled for the 1.4
2408 // reflection implementation since java.lang.Class now has
2409 // Java-level instance fields. Should rewrite this to handle this
2410 // case.
2411 if (!(JDK_Version::is_gte_jdk14x_version() && UseNewReflection)) {
2412 // Verify that java.lang.Class instances have a fake oop field added.
2413 instanceKlass* ik = instanceKlass::cast(k);
2415 // Check that we have the right class
2416 static bool first_time = true;
2417 guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
2418 first_time = false;
2419 const int extra = java_lang_Class::number_of_fake_oop_fields;
2420 guarantee(ik->nonstatic_field_size() == extra, "just checking");
2421 guarantee(ik->nonstatic_oop_map_count() == 1, "just checking");
2422 guarantee(ik->size_helper() == align_object_size(instanceOopDesc::header_size() + extra), "just checking");
2424 // Check that the map is (2,extra)
2425 int offset = java_lang_Class::klass_offset;
2427 OopMapBlock* map = ik->start_of_nonstatic_oop_maps();
2428 guarantee(map->offset() == offset && map->count() == (unsigned int) extra,
2429 "sanity");
2430 }
2431 }
2433 #endif // ndef PRODUCT
2435 // JNIid class for jfieldIDs only
2436 // Note to reviewers:
2437 // These JNI functions are just moved over to column 1 and not changed
2438 // in the compressed oops workspace.
2439 JNIid::JNIid(klassOop holder, int offset, JNIid* next) {
2440 _holder = holder;
2441 _offset = offset;
2442 _next = next;
2443 debug_only(_is_static_field_id = false;)
2444 }
2447 JNIid* JNIid::find(int offset) {
2448 JNIid* current = this;
2449 while (current != NULL) {
2450 if (current->offset() == offset) return current;
2451 current = current->next();
2452 }
2453 return NULL;
2454 }
2456 void JNIid::oops_do(OopClosure* f) {
2457 for (JNIid* cur = this; cur != NULL; cur = cur->next()) {
2458 f->do_oop(cur->holder_addr());
2459 }
2460 }
2462 void JNIid::deallocate(JNIid* current) {
2463 while (current != NULL) {
2464 JNIid* next = current->next();
2465 delete current;
2466 current = next;
2467 }
2468 }
2471 void JNIid::verify(klassOop holder) {
2472 int first_field_offset = instanceKlass::cast(holder)->offset_of_static_fields();
2473 int end_field_offset;
2474 end_field_offset = first_field_offset + (instanceKlass::cast(holder)->static_field_size() * wordSize);
2476 JNIid* current = this;
2477 while (current != NULL) {
2478 guarantee(current->holder() == holder, "Invalid klass in JNIid");
2479 #ifdef ASSERT
2480 int o = current->offset();
2481 if (current->is_static_field_id()) {
2482 guarantee(o >= first_field_offset && o < end_field_offset, "Invalid static field offset in JNIid");
2483 }
2484 #endif
2485 current = current->next();
2486 }
2487 }
2490 #ifdef ASSERT
2491 void instanceKlass::set_init_state(ClassState state) {
2492 bool good_state = as_klassOop()->is_shared() ? (_init_state <= state)
2493 : (_init_state < state);
2494 assert(good_state || state == allocated, "illegal state transition");
2495 _init_state = state;
2496 }
2497 #endif
2500 // RedefineClasses() support for previous versions:
2502 // Add an information node that contains weak references to the
2503 // interesting parts of the previous version of the_class.
2504 // This is also where we clean out any unused weak references.
2505 // Note that while we delete nodes from the _previous_versions
2506 // array, we never delete the array itself until the klass is
2507 // unloaded. The has_been_redefined() query depends on that fact.
2508 //
2509 void instanceKlass::add_previous_version(instanceKlassHandle ikh,
2510 BitMap* emcp_methods, int emcp_method_count) {
2511 assert(Thread::current()->is_VM_thread(),
2512 "only VMThread can add previous versions");
2514 if (_previous_versions == NULL) {
2515 // This is the first previous version so make some space.
2516 // Start with 2 elements under the assumption that the class
2517 // won't be redefined much.
2518 _previous_versions = new (ResourceObj::C_HEAP)
2519 GrowableArray<PreviousVersionNode *>(2, true);
2520 }
2522 // RC_TRACE macro has an embedded ResourceMark
2523 RC_TRACE(0x00000100, ("adding previous version ref for %s @%d, EMCP_cnt=%d",
2524 ikh->external_name(), _previous_versions->length(), emcp_method_count));
2525 constantPoolHandle cp_h(ikh->constants());
2526 jobject cp_ref;
2527 if (cp_h->is_shared()) {
2528 // a shared ConstantPool requires a regular reference; a weak
2529 // reference would be collectible
2530 cp_ref = JNIHandles::make_global(cp_h);
2531 } else {
2532 cp_ref = JNIHandles::make_weak_global(cp_h);
2533 }
2534 PreviousVersionNode * pv_node = NULL;
2535 objArrayOop old_methods = ikh->methods();
2537 if (emcp_method_count == 0) {
2538 // non-shared ConstantPool gets a weak reference
2539 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), NULL);
2540 RC_TRACE(0x00000400,
2541 ("add: all methods are obsolete; flushing any EMCP weak refs"));
2542 } else {
2543 int local_count = 0;
2544 GrowableArray<jweak>* method_refs = new (ResourceObj::C_HEAP)
2545 GrowableArray<jweak>(emcp_method_count, true);
2546 for (int i = 0; i < old_methods->length(); i++) {
2547 if (emcp_methods->at(i)) {
2548 // this old method is EMCP so save a weak ref
2549 methodOop old_method = (methodOop) old_methods->obj_at(i);
2550 methodHandle old_method_h(old_method);
2551 jweak method_ref = JNIHandles::make_weak_global(old_method_h);
2552 method_refs->append(method_ref);
2553 if (++local_count >= emcp_method_count) {
2554 // no more EMCP methods so bail out now
2555 break;
2556 }
2557 }
2558 }
2559 // non-shared ConstantPool gets a weak reference
2560 pv_node = new PreviousVersionNode(cp_ref, !cp_h->is_shared(), method_refs);
2561 }
2563 _previous_versions->append(pv_node);
2565 // Using weak references allows the interesting parts of previous
2566 // classes to be GC'ed when they are no longer needed. Since the
2567 // caller is the VMThread and we are at a safepoint, this is a good
2568 // time to clear out unused weak references.
2570 RC_TRACE(0x00000400, ("add: previous version length=%d",
2571 _previous_versions->length()));
2573 // skip the last entry since we just added it
2574 for (int i = _previous_versions->length() - 2; i >= 0; i--) {
2575 // check the previous versions array for a GC'ed weak refs
2576 pv_node = _previous_versions->at(i);
2577 cp_ref = pv_node->prev_constant_pool();
2578 assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2579 if (cp_ref == NULL) {
2580 delete pv_node;
2581 _previous_versions->remove_at(i);
2582 // Since we are traversing the array backwards, we don't have to
2583 // do anything special with the index.
2584 continue; // robustness
2585 }
2587 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2588 if (cp == NULL) {
2589 // this entry has been GC'ed so remove it
2590 delete pv_node;
2591 _previous_versions->remove_at(i);
2592 // Since we are traversing the array backwards, we don't have to
2593 // do anything special with the index.
2594 continue;
2595 } else {
2596 RC_TRACE(0x00000400, ("add: previous version @%d is alive", i));
2597 }
2599 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2600 if (method_refs != NULL) {
2601 RC_TRACE(0x00000400, ("add: previous methods length=%d",
2602 method_refs->length()));
2603 for (int j = method_refs->length() - 1; j >= 0; j--) {
2604 jweak method_ref = method_refs->at(j);
2605 assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2606 if (method_ref == NULL) {
2607 method_refs->remove_at(j);
2608 // Since we are traversing the array backwards, we don't have to
2609 // do anything special with the index.
2610 continue; // robustness
2611 }
2613 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2614 if (method == NULL || emcp_method_count == 0) {
2615 // This method entry has been GC'ed or the current
2616 // RedefineClasses() call has made all methods obsolete
2617 // so remove it.
2618 JNIHandles::destroy_weak_global(method_ref);
2619 method_refs->remove_at(j);
2620 } else {
2621 // RC_TRACE macro has an embedded ResourceMark
2622 RC_TRACE(0x00000400,
2623 ("add: %s(%s): previous method @%d in version @%d is alive",
2624 method->name()->as_C_string(), method->signature()->as_C_string(),
2625 j, i));
2626 }
2627 }
2628 }
2629 }
2631 int obsolete_method_count = old_methods->length() - emcp_method_count;
2633 if (emcp_method_count != 0 && obsolete_method_count != 0 &&
2634 _previous_versions->length() > 1) {
2635 // We have a mix of obsolete and EMCP methods. If there is more
2636 // than the previous version that we just added, then we have to
2637 // clear out any matching EMCP method entries the hard way.
2638 int local_count = 0;
2639 for (int i = 0; i < old_methods->length(); i++) {
2640 if (!emcp_methods->at(i)) {
2641 // only obsolete methods are interesting
2642 methodOop old_method = (methodOop) old_methods->obj_at(i);
2643 symbolOop m_name = old_method->name();
2644 symbolOop m_signature = old_method->signature();
2646 // skip the last entry since we just added it
2647 for (int j = _previous_versions->length() - 2; j >= 0; j--) {
2648 // check the previous versions array for a GC'ed weak refs
2649 pv_node = _previous_versions->at(j);
2650 cp_ref = pv_node->prev_constant_pool();
2651 assert(cp_ref != NULL, "cp ref was unexpectedly cleared");
2652 if (cp_ref == NULL) {
2653 delete pv_node;
2654 _previous_versions->remove_at(j);
2655 // Since we are traversing the array backwards, we don't have to
2656 // do anything special with the index.
2657 continue; // robustness
2658 }
2660 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2661 if (cp == NULL) {
2662 // this entry has been GC'ed so remove it
2663 delete pv_node;
2664 _previous_versions->remove_at(j);
2665 // Since we are traversing the array backwards, we don't have to
2666 // do anything special with the index.
2667 continue;
2668 }
2670 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2671 if (method_refs == NULL) {
2672 // We have run into a PreviousVersion generation where
2673 // all methods were made obsolete during that generation's
2674 // RedefineClasses() operation. At the time of that
2675 // operation, all EMCP methods were flushed so we don't
2676 // have to go back any further.
2677 //
2678 // A NULL method_refs is different than an empty method_refs.
2679 // We cannot infer any optimizations about older generations
2680 // from an empty method_refs for the current generation.
2681 break;
2682 }
2684 for (int k = method_refs->length() - 1; k >= 0; k--) {
2685 jweak method_ref = method_refs->at(k);
2686 assert(method_ref != NULL,
2687 "weak method ref was unexpectedly cleared");
2688 if (method_ref == NULL) {
2689 method_refs->remove_at(k);
2690 // Since we are traversing the array backwards, we don't
2691 // have to do anything special with the index.
2692 continue; // robustness
2693 }
2695 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2696 if (method == NULL) {
2697 // this method entry has been GC'ed so skip it
2698 JNIHandles::destroy_weak_global(method_ref);
2699 method_refs->remove_at(k);
2700 continue;
2701 }
2703 if (method->name() == m_name &&
2704 method->signature() == m_signature) {
2705 // The current RedefineClasses() call has made all EMCP
2706 // versions of this method obsolete so mark it as obsolete
2707 // and remove the weak ref.
2708 RC_TRACE(0x00000400,
2709 ("add: %s(%s): flush obsolete method @%d in version @%d",
2710 m_name->as_C_string(), m_signature->as_C_string(), k, j));
2712 method->set_is_obsolete();
2713 JNIHandles::destroy_weak_global(method_ref);
2714 method_refs->remove_at(k);
2715 break;
2716 }
2717 }
2719 // The previous loop may not find a matching EMCP method, but
2720 // that doesn't mean that we can optimize and not go any
2721 // further back in the PreviousVersion generations. The EMCP
2722 // method for this generation could have already been GC'ed,
2723 // but there still may be an older EMCP method that has not
2724 // been GC'ed.
2725 }
2727 if (++local_count >= obsolete_method_count) {
2728 // no more obsolete methods so bail out now
2729 break;
2730 }
2731 }
2732 }
2733 }
2734 } // end add_previous_version()
2737 // Determine if instanceKlass has a previous version.
2738 bool instanceKlass::has_previous_version() const {
2739 if (_previous_versions == NULL) {
2740 // no previous versions array so answer is easy
2741 return false;
2742 }
2744 for (int i = _previous_versions->length() - 1; i >= 0; i--) {
2745 // Check the previous versions array for an info node that hasn't
2746 // been GC'ed
2747 PreviousVersionNode * pv_node = _previous_versions->at(i);
2749 jobject cp_ref = pv_node->prev_constant_pool();
2750 assert(cp_ref != NULL, "cp reference was unexpectedly cleared");
2751 if (cp_ref == NULL) {
2752 continue; // robustness
2753 }
2755 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2756 if (cp != NULL) {
2757 // we have at least one previous version
2758 return true;
2759 }
2761 // We don't have to check the method refs. If the constant pool has
2762 // been GC'ed then so have the methods.
2763 }
2765 // all of the underlying nodes' info has been GC'ed
2766 return false;
2767 } // end has_previous_version()
2769 methodOop instanceKlass::method_with_idnum(int idnum) {
2770 methodOop m = NULL;
2771 if (idnum < methods()->length()) {
2772 m = (methodOop) methods()->obj_at(idnum);
2773 }
2774 if (m == NULL || m->method_idnum() != idnum) {
2775 for (int index = 0; index < methods()->length(); ++index) {
2776 m = (methodOop) methods()->obj_at(index);
2777 if (m->method_idnum() == idnum) {
2778 return m;
2779 }
2780 }
2781 }
2782 return m;
2783 }
2786 // Set the annotation at 'idnum' to 'anno'.
2787 // We don't want to create or extend the array if 'anno' is NULL, since that is the
2788 // default value. However, if the array exists and is long enough, we must set NULL values.
2789 void instanceKlass::set_methods_annotations_of(int idnum, typeArrayOop anno, objArrayOop* md_p) {
2790 objArrayOop md = *md_p;
2791 if (md != NULL && md->length() > idnum) {
2792 md->obj_at_put(idnum, anno);
2793 } else if (anno != NULL) {
2794 // create the array
2795 int length = MAX2(idnum+1, (int)_idnum_allocated_count);
2796 md = oopFactory::new_system_objArray(length, Thread::current());
2797 if (*md_p != NULL) {
2798 // copy the existing entries
2799 for (int index = 0; index < (*md_p)->length(); index++) {
2800 md->obj_at_put(index, (*md_p)->obj_at(index));
2801 }
2802 }
2803 set_annotations(md, md_p);
2804 md->obj_at_put(idnum, anno);
2805 } // if no array and idnum isn't included there is nothing to do
2806 }
2808 // Construct a PreviousVersionNode entry for the array hung off
2809 // the instanceKlass.
2810 PreviousVersionNode::PreviousVersionNode(jobject prev_constant_pool,
2811 bool prev_cp_is_weak, GrowableArray<jweak>* prev_EMCP_methods) {
2813 _prev_constant_pool = prev_constant_pool;
2814 _prev_cp_is_weak = prev_cp_is_weak;
2815 _prev_EMCP_methods = prev_EMCP_methods;
2816 }
2819 // Destroy a PreviousVersionNode
2820 PreviousVersionNode::~PreviousVersionNode() {
2821 if (_prev_constant_pool != NULL) {
2822 if (_prev_cp_is_weak) {
2823 JNIHandles::destroy_weak_global(_prev_constant_pool);
2824 } else {
2825 JNIHandles::destroy_global(_prev_constant_pool);
2826 }
2827 }
2829 if (_prev_EMCP_methods != NULL) {
2830 for (int i = _prev_EMCP_methods->length() - 1; i >= 0; i--) {
2831 jweak method_ref = _prev_EMCP_methods->at(i);
2832 if (method_ref != NULL) {
2833 JNIHandles::destroy_weak_global(method_ref);
2834 }
2835 }
2836 delete _prev_EMCP_methods;
2837 }
2838 }
2841 // Construct a PreviousVersionInfo entry
2842 PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
2843 _prev_constant_pool_handle = constantPoolHandle(); // NULL handle
2844 _prev_EMCP_method_handles = NULL;
2846 jobject cp_ref = pv_node->prev_constant_pool();
2847 assert(cp_ref != NULL, "constant pool ref was unexpectedly cleared");
2848 if (cp_ref == NULL) {
2849 return; // robustness
2850 }
2852 constantPoolOop cp = (constantPoolOop)JNIHandles::resolve(cp_ref);
2853 if (cp == NULL) {
2854 // Weak reference has been GC'ed. Since the constant pool has been
2855 // GC'ed, the methods have also been GC'ed.
2856 return;
2857 }
2859 // make the constantPoolOop safe to return
2860 _prev_constant_pool_handle = constantPoolHandle(cp);
2862 GrowableArray<jweak>* method_refs = pv_node->prev_EMCP_methods();
2863 if (method_refs == NULL) {
2864 // the instanceKlass did not have any EMCP methods
2865 return;
2866 }
2868 _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
2870 int n_methods = method_refs->length();
2871 for (int i = 0; i < n_methods; i++) {
2872 jweak method_ref = method_refs->at(i);
2873 assert(method_ref != NULL, "weak method ref was unexpectedly cleared");
2874 if (method_ref == NULL) {
2875 continue; // robustness
2876 }
2878 methodOop method = (methodOop)JNIHandles::resolve(method_ref);
2879 if (method == NULL) {
2880 // this entry has been GC'ed so skip it
2881 continue;
2882 }
2884 // make the methodOop safe to return
2885 _prev_EMCP_method_handles->append(methodHandle(method));
2886 }
2887 }
2890 // Destroy a PreviousVersionInfo
2891 PreviousVersionInfo::~PreviousVersionInfo() {
2892 // Since _prev_EMCP_method_handles is not C-heap allocated, we
2893 // don't have to delete it.
2894 }
2897 // Construct a helper for walking the previous versions array
2898 PreviousVersionWalker::PreviousVersionWalker(instanceKlass *ik) {
2899 _previous_versions = ik->previous_versions();
2900 _current_index = 0;
2901 // _hm needs no initialization
2902 _current_p = NULL;
2903 }
2906 // Destroy a PreviousVersionWalker
2907 PreviousVersionWalker::~PreviousVersionWalker() {
2908 // Delete the current info just in case the caller didn't walk to
2909 // the end of the previous versions list. No harm if _current_p is
2910 // already NULL.
2911 delete _current_p;
2913 // When _hm is destroyed, all the Handles returned in
2914 // PreviousVersionInfo objects will be destroyed.
2915 // Also, after this destructor is finished it will be
2916 // safe to delete the GrowableArray allocated in the
2917 // PreviousVersionInfo objects.
2918 }
2921 // Return the interesting information for the next previous version
2922 // of the klass. Returns NULL if there are no more previous versions.
2923 PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
2924 if (_previous_versions == NULL) {
2925 // no previous versions so nothing to return
2926 return NULL;
2927 }
2929 delete _current_p; // cleanup the previous info for the caller
2930 _current_p = NULL; // reset to NULL so we don't delete same object twice
2932 int length = _previous_versions->length();
2934 while (_current_index < length) {
2935 PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
2936 PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP)
2937 PreviousVersionInfo(pv_node);
2939 constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
2940 if (cp_h.is_null()) {
2941 delete pv_info;
2943 // The underlying node's info has been GC'ed so try the next one.
2944 // We don't have to check the methods. If the constant pool has
2945 // GC'ed then so have the methods.
2946 continue;
2947 }
2949 // Found a node with non GC'ed info so return it. The caller will
2950 // need to delete pv_info when they are done with it.
2951 _current_p = pv_info;
2952 return pv_info;
2953 }
2955 // all of the underlying nodes' info has been GC'ed
2956 return NULL;
2957 } // end next_previous_version()