Tue, 15 Sep 2009 21:53:47 -0700
6863023: need non-perm oops in code cache for JSR 292
Summary: Make a special root-list for those few nmethods which might contain non-perm oops.
Reviewed-by: twisti, kvn, never, jmasa, ysr
1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_codeCache.cpp.incl"
28 // Helper class for printing in CodeCache
30 class CodeBlob_sizes {
31 private:
32 int count;
33 int total_size;
34 int header_size;
35 int code_size;
36 int stub_size;
37 int relocation_size;
38 int scopes_oop_size;
39 int scopes_data_size;
40 int scopes_pcs_size;
42 public:
43 CodeBlob_sizes() {
44 count = 0;
45 total_size = 0;
46 header_size = 0;
47 code_size = 0;
48 stub_size = 0;
49 relocation_size = 0;
50 scopes_oop_size = 0;
51 scopes_data_size = 0;
52 scopes_pcs_size = 0;
53 }
55 int total() { return total_size; }
56 bool is_empty() { return count == 0; }
58 void print(const char* title) {
59 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
60 count,
61 title,
62 total() / K,
63 header_size * 100 / total_size,
64 relocation_size * 100 / total_size,
65 code_size * 100 / total_size,
66 stub_size * 100 / total_size,
67 scopes_oop_size * 100 / total_size,
68 scopes_data_size * 100 / total_size,
69 scopes_pcs_size * 100 / total_size);
70 }
72 void add(CodeBlob* cb) {
73 count++;
74 total_size += cb->size();
75 header_size += cb->header_size();
76 relocation_size += cb->relocation_size();
77 scopes_oop_size += cb->oops_size();
78 if (cb->is_nmethod()) {
79 nmethod *nm = (nmethod*)cb;
80 code_size += nm->code_size();
81 stub_size += nm->stub_size();
83 scopes_data_size += nm->scopes_data_size();
84 scopes_pcs_size += nm->scopes_pcs_size();
85 } else {
86 code_size += cb->instructions_size();
87 }
88 }
89 };
92 // CodeCache implementation
94 CodeHeap * CodeCache::_heap = new CodeHeap();
95 int CodeCache::_number_of_blobs = 0;
96 int CodeCache::_number_of_nmethods_with_dependencies = 0;
97 bool CodeCache::_needs_cache_clean = false;
98 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
101 CodeBlob* CodeCache::first() {
102 assert_locked_or_safepoint(CodeCache_lock);
103 return (CodeBlob*)_heap->first();
104 }
107 CodeBlob* CodeCache::next(CodeBlob* cb) {
108 assert_locked_or_safepoint(CodeCache_lock);
109 return (CodeBlob*)_heap->next(cb);
110 }
113 CodeBlob* CodeCache::alive(CodeBlob *cb) {
114 assert_locked_or_safepoint(CodeCache_lock);
115 while (cb != NULL && !cb->is_alive()) cb = next(cb);
116 return cb;
117 }
120 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
121 assert_locked_or_safepoint(CodeCache_lock);
122 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
123 return (nmethod*)cb;
124 }
127 CodeBlob* CodeCache::allocate(int size) {
128 // Do not seize the CodeCache lock here--if the caller has not
129 // already done so, we are going to lose bigtime, since the code
130 // cache will contain a garbage CodeBlob until the caller can
131 // run the constructor for the CodeBlob subclass he is busy
132 // instantiating.
133 guarantee(size >= 0, "allocation request must be reasonable");
134 assert_locked_or_safepoint(CodeCache_lock);
135 CodeBlob* cb = NULL;
136 _number_of_blobs++;
137 while (true) {
138 cb = (CodeBlob*)_heap->allocate(size);
139 if (cb != NULL) break;
140 if (!_heap->expand_by(CodeCacheExpansionSize)) {
141 // Expansion failed
142 return NULL;
143 }
144 if (PrintCodeCacheExtension) {
145 ResourceMark rm;
146 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
147 (intptr_t)_heap->begin(), (intptr_t)_heap->end(),
148 (address)_heap->end() - (address)_heap->begin());
149 }
150 }
151 verify_if_often();
152 print_trace("allocation", cb, size);
153 return cb;
154 }
156 void CodeCache::free(CodeBlob* cb) {
157 assert_locked_or_safepoint(CodeCache_lock);
158 verify_if_often();
160 print_trace("free", cb);
161 if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
162 _number_of_nmethods_with_dependencies--;
163 }
164 _number_of_blobs--;
166 _heap->deallocate(cb);
168 verify_if_often();
169 assert(_number_of_blobs >= 0, "sanity check");
170 }
173 void CodeCache::commit(CodeBlob* cb) {
174 // this is called by nmethod::nmethod, which must already own CodeCache_lock
175 assert_locked_or_safepoint(CodeCache_lock);
176 if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) {
177 _number_of_nmethods_with_dependencies++;
178 }
179 // flush the hardware I-cache
180 ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size());
181 }
184 void CodeCache::flush() {
185 assert_locked_or_safepoint(CodeCache_lock);
186 Unimplemented();
187 }
190 // Iteration over CodeBlobs
192 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
193 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
194 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
197 bool CodeCache::contains(void *p) {
198 // It should be ok to call contains without holding a lock
199 return _heap->contains(p);
200 }
203 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
204 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
205 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
206 CodeBlob* CodeCache::find_blob(void* start) {
207 CodeBlob* result = find_blob_unsafe(start);
208 if (result == NULL) return NULL;
209 // We could potientially look up non_entrant methods
210 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
211 return result;
212 }
214 nmethod* CodeCache::find_nmethod(void* start) {
215 CodeBlob *cb = find_blob(start);
216 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
217 return (nmethod*)cb;
218 }
221 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
222 assert_locked_or_safepoint(CodeCache_lock);
223 FOR_ALL_BLOBS(p) {
224 f(p);
225 }
226 }
229 void CodeCache::nmethods_do(void f(nmethod* nm)) {
230 assert_locked_or_safepoint(CodeCache_lock);
231 FOR_ALL_BLOBS(nm) {
232 if (nm->is_nmethod()) f((nmethod*)nm);
233 }
234 }
237 int CodeCache::alignment_unit() {
238 return (int)_heap->alignment_unit();
239 }
242 int CodeCache::alignment_offset() {
243 return (int)_heap->alignment_offset();
244 }
247 // Mark code blobs for unloading if they contain otherwise
248 // unreachable oops.
249 void CodeCache::do_unloading(BoolObjectClosure* is_alive,
250 OopClosure* keep_alive,
251 bool unloading_occurred) {
252 assert_locked_or_safepoint(CodeCache_lock);
253 FOR_ALL_ALIVE_BLOBS(cb) {
254 cb->do_unloading(is_alive, keep_alive, unloading_occurred);
255 }
256 }
258 void CodeCache::blobs_do(CodeBlobClosure* f) {
259 assert_locked_or_safepoint(CodeCache_lock);
260 FOR_ALL_ALIVE_BLOBS(cb) {
261 f->do_code_blob(cb);
263 #ifdef ASSERT
264 if (cb->is_nmethod())
265 ((nmethod*)cb)->verify_scavenge_root_oops();
266 #endif //ASSERT
267 }
268 }
270 // Walk the list of methods which might contain non-perm oops.
271 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
272 assert_locked_or_safepoint(CodeCache_lock);
273 debug_only(mark_scavenge_root_nmethods());
275 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
276 debug_only(cur->clear_scavenge_root_marked());
277 assert(cur->scavenge_root_not_marked(), "");
278 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
280 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
281 #ifndef PRODUCT
282 if (TraceScavenge) {
283 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
284 }
285 #endif //PRODUCT
286 if (is_live)
287 // Perform cur->oops_do(f), maybe just once per nmethod.
288 f->do_code_blob(cur);
289 }
291 // Check for stray marks.
292 debug_only(verify_perm_nmethods(NULL));
293 }
295 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
296 assert_locked_or_safepoint(CodeCache_lock);
297 nm->set_on_scavenge_root_list();
298 nm->set_scavenge_root_link(_scavenge_root_nmethods);
299 set_scavenge_root_nmethods(nm);
300 print_trace("add_scavenge_root", nm);
301 }
303 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
304 assert_locked_or_safepoint(CodeCache_lock);
305 print_trace("drop_scavenge_root", nm);
306 nmethod* last = NULL;
307 nmethod* cur = scavenge_root_nmethods();
308 while (cur != NULL) {
309 nmethod* next = cur->scavenge_root_link();
310 if (cur == nm) {
311 if (last != NULL)
312 last->set_scavenge_root_link(next);
313 else set_scavenge_root_nmethods(next);
314 nm->set_scavenge_root_link(NULL);
315 nm->clear_on_scavenge_root_list();
316 return;
317 }
318 last = cur;
319 cur = next;
320 }
321 assert(false, "should have been on list");
322 }
324 void CodeCache::prune_scavenge_root_nmethods() {
325 assert_locked_or_safepoint(CodeCache_lock);
326 debug_only(mark_scavenge_root_nmethods());
328 nmethod* last = NULL;
329 nmethod* cur = scavenge_root_nmethods();
330 while (cur != NULL) {
331 nmethod* next = cur->scavenge_root_link();
332 debug_only(cur->clear_scavenge_root_marked());
333 assert(cur->scavenge_root_not_marked(), "");
334 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
336 if (!cur->is_zombie() && !cur->is_unloaded()
337 && cur->detect_scavenge_root_oops()) {
338 // Keep it. Advance 'last' to prevent deletion.
339 last = cur;
340 } else {
341 // Prune it from the list, so we don't have to look at it any more.
342 print_trace("prune_scavenge_root", cur);
343 cur->set_scavenge_root_link(NULL);
344 cur->clear_on_scavenge_root_list();
345 if (last != NULL)
346 last->set_scavenge_root_link(next);
347 else set_scavenge_root_nmethods(next);
348 }
349 cur = next;
350 }
352 // Check for stray marks.
353 debug_only(verify_perm_nmethods(NULL));
354 }
356 #ifndef PRODUCT
357 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
358 // While we are here, verify the integrity of the list.
359 mark_scavenge_root_nmethods();
360 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
361 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
362 cur->clear_scavenge_root_marked();
363 }
364 verify_perm_nmethods(f);
365 }
367 // Temporarily mark nmethods that are claimed to be on the non-perm list.
368 void CodeCache::mark_scavenge_root_nmethods() {
369 FOR_ALL_ALIVE_BLOBS(cb) {
370 if (cb->is_nmethod()) {
371 nmethod *nm = (nmethod*)cb;
372 assert(nm->scavenge_root_not_marked(), "clean state");
373 if (nm->on_scavenge_root_list())
374 nm->set_scavenge_root_marked();
375 }
376 }
377 }
379 // If the closure is given, run it on the unlisted nmethods.
380 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
381 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
382 FOR_ALL_ALIVE_BLOBS(cb) {
383 bool call_f = (f_or_null != NULL);
384 if (cb->is_nmethod()) {
385 nmethod *nm = (nmethod*)cb;
386 assert(nm->scavenge_root_not_marked(), "must be already processed");
387 if (nm->on_scavenge_root_list())
388 call_f = false; // don't show this one to the client
389 nm->verify_scavenge_root_oops();
390 } else {
391 call_f = false; // not an nmethod
392 }
393 if (call_f) f_or_null->do_code_blob(cb);
394 }
395 }
396 #endif //PRODUCT
398 void CodeCache::gc_prologue() {
399 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
400 }
403 void CodeCache::gc_epilogue() {
404 assert_locked_or_safepoint(CodeCache_lock);
405 FOR_ALL_ALIVE_BLOBS(cb) {
406 if (cb->is_nmethod()) {
407 nmethod *nm = (nmethod*)cb;
408 assert(!nm->is_unloaded(), "Tautology");
409 if (needs_cache_clean()) {
410 nm->cleanup_inline_caches();
411 }
412 debug_only(nm->verify();)
413 }
414 cb->fix_oop_relocations();
415 }
416 set_needs_cache_clean(false);
417 prune_scavenge_root_nmethods();
418 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
419 }
422 address CodeCache::first_address() {
423 assert_locked_or_safepoint(CodeCache_lock);
424 return (address)_heap->begin();
425 }
428 address CodeCache::last_address() {
429 assert_locked_or_safepoint(CodeCache_lock);
430 return (address)_heap->end();
431 }
434 void icache_init();
436 void CodeCache::initialize() {
437 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
438 #ifdef COMPILER2
439 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
440 #endif
441 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
442 // This was originally just a check of the alignment, causing failure, instead, round
443 // the code cache to the page size. In particular, Solaris is moving to a larger
444 // default page size.
445 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
446 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
447 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
448 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
449 vm_exit_during_initialization("Could not reserve enough space for code cache");
450 }
452 MemoryService::add_code_heap_memory_pool(_heap);
454 // Initialize ICache flush mechanism
455 // This service is needed for os::register_code_area
456 icache_init();
458 // Give OS a chance to register generated code area.
459 // This is used on Windows 64 bit platforms to register
460 // Structured Exception Handlers for our generated code.
461 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
462 }
465 void codeCache_init() {
466 CodeCache::initialize();
467 }
469 //------------------------------------------------------------------------------------------------
471 int CodeCache::number_of_nmethods_with_dependencies() {
472 return _number_of_nmethods_with_dependencies;
473 }
475 void CodeCache::clear_inline_caches() {
476 assert_locked_or_safepoint(CodeCache_lock);
477 FOR_ALL_ALIVE_NMETHODS(nm) {
478 nm->clear_inline_caches();
479 }
480 }
482 #ifndef PRODUCT
483 // used to keep track of how much time is spent in mark_for_deoptimization
484 static elapsedTimer dependentCheckTime;
485 static int dependentCheckCount = 0;
486 #endif // PRODUCT
489 int CodeCache::mark_for_deoptimization(DepChange& changes) {
490 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
492 #ifndef PRODUCT
493 dependentCheckTime.start();
494 dependentCheckCount++;
495 #endif // PRODUCT
497 int number_of_marked_CodeBlobs = 0;
499 // search the hierarchy looking for nmethods which are affected by the loading of this class
501 // then search the interfaces this class implements looking for nmethods
502 // which might be dependent of the fact that an interface only had one
503 // implementor.
505 { No_Safepoint_Verifier nsv;
506 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
507 klassOop d = str.klass();
508 number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes);
509 }
510 }
512 if (VerifyDependencies) {
513 // Turn off dependency tracing while actually testing deps.
514 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
515 FOR_ALL_ALIVE_NMETHODS(nm) {
516 if (!nm->is_marked_for_deoptimization() &&
517 nm->check_all_dependencies()) {
518 ResourceMark rm;
519 tty->print_cr("Should have been marked for deoptimization:");
520 changes.print();
521 nm->print();
522 nm->print_dependencies();
523 }
524 }
525 }
527 #ifndef PRODUCT
528 dependentCheckTime.stop();
529 #endif // PRODUCT
531 return number_of_marked_CodeBlobs;
532 }
535 #ifdef HOTSWAP
536 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
537 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
538 int number_of_marked_CodeBlobs = 0;
540 // Deoptimize all methods of the evolving class itself
541 objArrayOop old_methods = dependee->methods();
542 for (int i = 0; i < old_methods->length(); i++) {
543 ResourceMark rm;
544 methodOop old_method = (methodOop) old_methods->obj_at(i);
545 nmethod *nm = old_method->code();
546 if (nm != NULL) {
547 nm->mark_for_deoptimization();
548 number_of_marked_CodeBlobs++;
549 }
550 }
552 FOR_ALL_ALIVE_NMETHODS(nm) {
553 if (nm->is_marked_for_deoptimization()) {
554 // ...Already marked in the previous pass; don't count it again.
555 } else if (nm->is_evol_dependent_on(dependee())) {
556 ResourceMark rm;
557 nm->mark_for_deoptimization();
558 number_of_marked_CodeBlobs++;
559 } else {
560 // flush caches in case they refer to a redefined methodOop
561 nm->clear_inline_caches();
562 }
563 }
565 return number_of_marked_CodeBlobs;
566 }
567 #endif // HOTSWAP
570 // Deoptimize all methods
571 void CodeCache::mark_all_nmethods_for_deoptimization() {
572 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
573 FOR_ALL_ALIVE_NMETHODS(nm) {
574 nm->mark_for_deoptimization();
575 }
576 }
579 int CodeCache::mark_for_deoptimization(methodOop dependee) {
580 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
581 int number_of_marked_CodeBlobs = 0;
583 FOR_ALL_ALIVE_NMETHODS(nm) {
584 if (nm->is_dependent_on_method(dependee)) {
585 ResourceMark rm;
586 nm->mark_for_deoptimization();
587 number_of_marked_CodeBlobs++;
588 }
589 }
591 return number_of_marked_CodeBlobs;
592 }
594 void CodeCache::make_marked_nmethods_zombies() {
595 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
596 FOR_ALL_ALIVE_NMETHODS(nm) {
597 if (nm->is_marked_for_deoptimization()) {
599 // If the nmethod has already been made non-entrant and it can be converted
600 // then zombie it now. Otherwise make it non-entrant and it will eventually
601 // be zombied when it is no longer seen on the stack. Note that the nmethod
602 // might be "entrant" and not on the stack and so could be zombied immediately
603 // but we can't tell because we don't track it on stack until it becomes
604 // non-entrant.
606 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
607 nm->make_zombie();
608 } else {
609 nm->make_not_entrant();
610 }
611 }
612 }
613 }
615 void CodeCache::make_marked_nmethods_not_entrant() {
616 assert_locked_or_safepoint(CodeCache_lock);
617 FOR_ALL_ALIVE_NMETHODS(nm) {
618 if (nm->is_marked_for_deoptimization()) {
619 nm->make_not_entrant();
620 }
621 }
622 }
624 void CodeCache::verify() {
625 _heap->verify();
626 FOR_ALL_ALIVE_BLOBS(p) {
627 p->verify();
628 }
629 }
631 //------------------------------------------------------------------------------------------------
632 // Non-product version
634 #ifndef PRODUCT
636 void CodeCache::verify_if_often() {
637 if (VerifyCodeCacheOften) {
638 _heap->verify();
639 }
640 }
642 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
643 if (PrintCodeCache2) { // Need to add a new flag
644 ResourceMark rm;
645 if (size == 0) size = cb->size();
646 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
647 }
648 }
650 void CodeCache::print_internals() {
651 int nmethodCount = 0;
652 int runtimeStubCount = 0;
653 int adapterCount = 0;
654 int deoptimizationStubCount = 0;
655 int uncommonTrapStubCount = 0;
656 int bufferBlobCount = 0;
657 int total = 0;
658 int nmethodAlive = 0;
659 int nmethodNotEntrant = 0;
660 int nmethodZombie = 0;
661 int nmethodUnloaded = 0;
662 int nmethodJava = 0;
663 int nmethodNative = 0;
664 int maxCodeSize = 0;
665 ResourceMark rm;
667 CodeBlob *cb;
668 for (cb = first(); cb != NULL; cb = next(cb)) {
669 total++;
670 if (cb->is_nmethod()) {
671 nmethod* nm = (nmethod*)cb;
673 if (Verbose && nm->method() != NULL) {
674 ResourceMark rm;
675 char *method_name = nm->method()->name_and_sig_as_C_string();
676 tty->print("%s", method_name);
677 if(nm->is_alive()) { tty->print_cr(" alive"); }
678 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
679 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
680 }
682 nmethodCount++;
684 if(nm->is_alive()) { nmethodAlive++; }
685 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
686 if(nm->is_zombie()) { nmethodZombie++; }
687 if(nm->is_unloaded()) { nmethodUnloaded++; }
688 if(nm->is_native_method()) { nmethodNative++; }
690 if(nm->method() != NULL && nm->is_java_method()) {
691 nmethodJava++;
692 if(nm->code_size() > maxCodeSize) {
693 maxCodeSize = nm->code_size();
694 }
695 }
696 } else if (cb->is_runtime_stub()) {
697 runtimeStubCount++;
698 } else if (cb->is_deoptimization_stub()) {
699 deoptimizationStubCount++;
700 } else if (cb->is_uncommon_trap_stub()) {
701 uncommonTrapStubCount++;
702 } else if (cb->is_adapter_blob()) {
703 adapterCount++;
704 } else if (cb->is_buffer_blob()) {
705 bufferBlobCount++;
706 }
707 }
709 int bucketSize = 512;
710 int bucketLimit = maxCodeSize / bucketSize + 1;
711 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit);
712 memset(buckets,0,sizeof(int) * bucketLimit);
714 for (cb = first(); cb != NULL; cb = next(cb)) {
715 if (cb->is_nmethod()) {
716 nmethod* nm = (nmethod*)cb;
717 if(nm->is_java_method()) {
718 buckets[nm->code_size() / bucketSize]++;
719 }
720 }
721 }
722 tty->print_cr("Code Cache Entries (total of %d)",total);
723 tty->print_cr("-------------------------------------------------");
724 tty->print_cr("nmethods: %d",nmethodCount);
725 tty->print_cr("\talive: %d",nmethodAlive);
726 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
727 tty->print_cr("\tzombie: %d",nmethodZombie);
728 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
729 tty->print_cr("\tjava: %d",nmethodJava);
730 tty->print_cr("\tnative: %d",nmethodNative);
731 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
732 tty->print_cr("adapters: %d",adapterCount);
733 tty->print_cr("buffer blobs: %d",bufferBlobCount);
734 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
735 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
736 tty->print_cr("\nnmethod size distribution (non-zombie java)");
737 tty->print_cr("-------------------------------------------------");
739 for(int i=0; i<bucketLimit; i++) {
740 if(buckets[i] != 0) {
741 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
742 tty->fill_to(40);
743 tty->print_cr("%d",buckets[i]);
744 }
745 }
747 FREE_C_HEAP_ARRAY(int, buckets);
748 }
750 void CodeCache::print() {
751 CodeBlob_sizes live;
752 CodeBlob_sizes dead;
754 FOR_ALL_BLOBS(p) {
755 if (!p->is_alive()) {
756 dead.add(p);
757 } else {
758 live.add(p);
759 }
760 }
762 tty->print_cr("CodeCache:");
764 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
765 dependentCheckTime.seconds() / dependentCheckCount);
767 if (!live.is_empty()) {
768 live.print("live");
769 }
770 if (!dead.is_empty()) {
771 dead.print("dead");
772 }
775 if (Verbose) {
776 // print the oop_map usage
777 int code_size = 0;
778 int number_of_blobs = 0;
779 int number_of_oop_maps = 0;
780 int map_size = 0;
781 FOR_ALL_BLOBS(p) {
782 if (p->is_alive()) {
783 number_of_blobs++;
784 code_size += p->instructions_size();
785 OopMapSet* set = p->oop_maps();
786 if (set != NULL) {
787 number_of_oop_maps += set->size();
788 map_size += set->heap_size();
789 }
790 }
791 }
792 tty->print_cr("OopMaps");
793 tty->print_cr(" #blobs = %d", number_of_blobs);
794 tty->print_cr(" code size = %d", code_size);
795 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
796 tty->print_cr(" map size = %d", map_size);
797 }
799 }
801 #endif // PRODUCT