1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/code/codeCache.cpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,662 @@ 1.4 +/* 1.5 + * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +# include "incls/_precompiled.incl" 1.29 +# include "incls/_codeCache.cpp.incl" 1.30 + 1.31 +// Helper class for printing in CodeCache 1.32 + 1.33 +class CodeBlob_sizes { 1.34 + private: 1.35 + int count; 1.36 + int total_size; 1.37 + int header_size; 1.38 + int code_size; 1.39 + int stub_size; 1.40 + int relocation_size; 1.41 + int scopes_oop_size; 1.42 + int scopes_data_size; 1.43 + int scopes_pcs_size; 1.44 + 1.45 + public: 1.46 + CodeBlob_sizes() { 1.47 + count = 0; 1.48 + total_size = 0; 1.49 + header_size = 0; 1.50 + code_size = 0; 1.51 + stub_size = 0; 1.52 + relocation_size = 0; 1.53 + scopes_oop_size = 0; 1.54 + scopes_data_size = 0; 1.55 + scopes_pcs_size = 0; 1.56 + } 1.57 + 1.58 + int total() { return total_size; } 1.59 + bool is_empty() { return count == 0; } 1.60 + 1.61 + void print(const char* title) { 1.62 + tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])", 1.63 + count, 1.64 + title, 1.65 + total() / K, 1.66 + header_size * 100 / total_size, 1.67 + relocation_size * 100 / total_size, 1.68 + code_size * 100 / total_size, 1.69 + stub_size * 100 / total_size, 1.70 + scopes_oop_size * 100 / total_size, 1.71 + scopes_data_size * 100 / total_size, 1.72 + scopes_pcs_size * 100 / total_size); 1.73 + } 1.74 + 1.75 + void add(CodeBlob* cb) { 1.76 + count++; 1.77 + total_size += cb->size(); 1.78 + header_size += cb->header_size(); 1.79 + relocation_size += cb->relocation_size(); 1.80 + scopes_oop_size += cb->oops_size(); 1.81 + if (cb->is_nmethod()) { 1.82 + nmethod *nm = (nmethod*)cb; 1.83 + code_size += nm->code_size(); 1.84 + stub_size += nm->stub_size(); 1.85 + 1.86 + scopes_data_size += nm->scopes_data_size(); 1.87 + scopes_pcs_size += nm->scopes_pcs_size(); 1.88 + } else { 1.89 + code_size += cb->instructions_size(); 1.90 + } 1.91 + } 1.92 +}; 1.93 + 1.94 + 1.95 +// CodeCache implementation 1.96 + 1.97 +CodeHeap * CodeCache::_heap = new CodeHeap(); 1.98 +int CodeCache::_number_of_blobs = 0; 1.99 +int CodeCache::_number_of_nmethods_with_dependencies = 0; 1.100 +bool CodeCache::_needs_cache_clean = false; 1.101 + 1.102 + 1.103 +CodeBlob* CodeCache::first() { 1.104 + assert_locked_or_safepoint(CodeCache_lock); 1.105 + return (CodeBlob*)_heap->first(); 1.106 +} 1.107 + 1.108 + 1.109 +CodeBlob* CodeCache::next(CodeBlob* cb) { 1.110 + assert_locked_or_safepoint(CodeCache_lock); 1.111 + return (CodeBlob*)_heap->next(cb); 1.112 +} 1.113 + 1.114 + 1.115 +CodeBlob* CodeCache::alive(CodeBlob *cb) { 1.116 + assert_locked_or_safepoint(CodeCache_lock); 1.117 + while (cb != NULL && !cb->is_alive()) cb = next(cb); 1.118 + return cb; 1.119 +} 1.120 + 1.121 + 1.122 +nmethod* CodeCache::alive_nmethod(CodeBlob* cb) { 1.123 + assert_locked_or_safepoint(CodeCache_lock); 1.124 + while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb); 1.125 + return (nmethod*)cb; 1.126 +} 1.127 + 1.128 + 1.129 +CodeBlob* CodeCache::allocate(int size) { 1.130 + // Do not seize the CodeCache lock here--if the caller has not 1.131 + // already done so, we are going to lose bigtime, since the code 1.132 + // cache will contain a garbage CodeBlob until the caller can 1.133 + // run the constructor for the CodeBlob subclass he is busy 1.134 + // instantiating. 1.135 + guarantee(size >= 0, "allocation request must be reasonable"); 1.136 + assert_locked_or_safepoint(CodeCache_lock); 1.137 + CodeBlob* cb = NULL; 1.138 + _number_of_blobs++; 1.139 + while (true) { 1.140 + cb = (CodeBlob*)_heap->allocate(size); 1.141 + if (cb != NULL) break; 1.142 + if (!_heap->expand_by(CodeCacheExpansionSize)) { 1.143 + // Expansion failed 1.144 + return NULL; 1.145 + } 1.146 + if (PrintCodeCacheExtension) { 1.147 + ResourceMark rm; 1.148 + tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)", 1.149 + (intptr_t)_heap->begin(), (intptr_t)_heap->end(), 1.150 + (address)_heap->end() - (address)_heap->begin()); 1.151 + } 1.152 + } 1.153 + verify_if_often(); 1.154 + if (PrintCodeCache2) { // Need to add a new flag 1.155 + ResourceMark rm; 1.156 + tty->print_cr("CodeCache allocation: addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, size); 1.157 + } 1.158 + return cb; 1.159 +} 1.160 + 1.161 +void CodeCache::free(CodeBlob* cb) { 1.162 + assert_locked_or_safepoint(CodeCache_lock); 1.163 + verify_if_often(); 1.164 + 1.165 + if (PrintCodeCache2) { // Need to add a new flag 1.166 + ResourceMark rm; 1.167 + tty->print_cr("CodeCache free: addr: " INTPTR_FORMAT ", size: 0x%x\n", cb, cb->size()); 1.168 + } 1.169 + if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) { 1.170 + _number_of_nmethods_with_dependencies--; 1.171 + } 1.172 + _number_of_blobs--; 1.173 + 1.174 + _heap->deallocate(cb); 1.175 + 1.176 + verify_if_often(); 1.177 + assert(_number_of_blobs >= 0, "sanity check"); 1.178 +} 1.179 + 1.180 + 1.181 +void CodeCache::commit(CodeBlob* cb) { 1.182 + // this is called by nmethod::nmethod, which must already own CodeCache_lock 1.183 + assert_locked_or_safepoint(CodeCache_lock); 1.184 + if (cb->is_nmethod() && ((nmethod *)cb)->has_dependencies()) { 1.185 + _number_of_nmethods_with_dependencies++; 1.186 + } 1.187 + // flush the hardware I-cache 1.188 + ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size()); 1.189 +} 1.190 + 1.191 + 1.192 +void CodeCache::flush() { 1.193 + assert_locked_or_safepoint(CodeCache_lock); 1.194 + Unimplemented(); 1.195 +} 1.196 + 1.197 + 1.198 +// Iteration over CodeBlobs 1.199 + 1.200 +#define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) ) 1.201 +#define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var))) 1.202 +#define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var))) 1.203 + 1.204 + 1.205 +bool CodeCache::contains(void *p) { 1.206 + // It should be ok to call contains without holding a lock 1.207 + return _heap->contains(p); 1.208 +} 1.209 + 1.210 + 1.211 +// This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not 1.212 +// looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain 1.213 +// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 1.214 +CodeBlob* CodeCache::find_blob(void* start) { 1.215 + CodeBlob* result = find_blob_unsafe(start); 1.216 + if (result == NULL) return NULL; 1.217 + // We could potientially look up non_entrant methods 1.218 + guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); 1.219 + return result; 1.220 +} 1.221 + 1.222 +nmethod* CodeCache::find_nmethod(void* start) { 1.223 + CodeBlob *cb = find_blob(start); 1.224 + assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod"); 1.225 + return (nmethod*)cb; 1.226 +} 1.227 + 1.228 + 1.229 +void CodeCache::blobs_do(void f(CodeBlob* nm)) { 1.230 + assert_locked_or_safepoint(CodeCache_lock); 1.231 + FOR_ALL_BLOBS(p) { 1.232 + f(p); 1.233 + } 1.234 +} 1.235 + 1.236 + 1.237 +void CodeCache::nmethods_do(void f(nmethod* nm)) { 1.238 + assert_locked_or_safepoint(CodeCache_lock); 1.239 + FOR_ALL_BLOBS(nm) { 1.240 + if (nm->is_nmethod()) f((nmethod*)nm); 1.241 + } 1.242 +} 1.243 + 1.244 + 1.245 +int CodeCache::alignment_unit() { 1.246 + return (int)_heap->alignment_unit(); 1.247 +} 1.248 + 1.249 + 1.250 +int CodeCache::alignment_offset() { 1.251 + return (int)_heap->alignment_offset(); 1.252 +} 1.253 + 1.254 + 1.255 +// Mark code blobs for unloading if they contain otherwise 1.256 +// unreachable oops. 1.257 +void CodeCache::do_unloading(BoolObjectClosure* is_alive, 1.258 + OopClosure* keep_alive, 1.259 + bool unloading_occurred) { 1.260 + assert_locked_or_safepoint(CodeCache_lock); 1.261 + FOR_ALL_ALIVE_BLOBS(cb) { 1.262 + cb->do_unloading(is_alive, keep_alive, unloading_occurred); 1.263 + } 1.264 +} 1.265 + 1.266 +void CodeCache::oops_do(OopClosure* f) { 1.267 + assert_locked_or_safepoint(CodeCache_lock); 1.268 + FOR_ALL_ALIVE_BLOBS(cb) { 1.269 + cb->oops_do(f); 1.270 + } 1.271 +} 1.272 + 1.273 +void CodeCache::gc_prologue() { 1.274 +} 1.275 + 1.276 + 1.277 +void CodeCache::gc_epilogue() { 1.278 + assert_locked_or_safepoint(CodeCache_lock); 1.279 + FOR_ALL_ALIVE_BLOBS(cb) { 1.280 + if (cb->is_nmethod()) { 1.281 + nmethod *nm = (nmethod*)cb; 1.282 + assert(!nm->is_unloaded(), "Tautology"); 1.283 + if (needs_cache_clean()) { 1.284 + nm->cleanup_inline_caches(); 1.285 + } 1.286 + debug_only(nm->verify();) 1.287 + } 1.288 + cb->fix_oop_relocations(); 1.289 + } 1.290 + set_needs_cache_clean(false); 1.291 +} 1.292 + 1.293 + 1.294 +address CodeCache::first_address() { 1.295 + assert_locked_or_safepoint(CodeCache_lock); 1.296 + return (address)_heap->begin(); 1.297 +} 1.298 + 1.299 + 1.300 +address CodeCache::last_address() { 1.301 + assert_locked_or_safepoint(CodeCache_lock); 1.302 + return (address)_heap->end(); 1.303 +} 1.304 + 1.305 + 1.306 +void icache_init(); 1.307 + 1.308 +void CodeCache::initialize() { 1.309 + assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1.310 +#ifdef COMPILER2 1.311 + assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1.312 +#endif 1.313 + assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1.314 + // This was originally just a check of the alignment, causing failure, instead, round 1.315 + // the code cache to the page size. In particular, Solaris is moving to a larger 1.316 + // default page size. 1.317 + CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); 1.318 + InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size()); 1.319 + ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size()); 1.320 + if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) { 1.321 + vm_exit_during_initialization("Could not reserve enough space for code cache"); 1.322 + } 1.323 + 1.324 + MemoryService::add_code_heap_memory_pool(_heap); 1.325 + 1.326 + // Initialize ICache flush mechanism 1.327 + // This service is needed for os::register_code_area 1.328 + icache_init(); 1.329 + 1.330 + // Give OS a chance to register generated code area. 1.331 + // This is used on Windows 64 bit platforms to register 1.332 + // Structured Exception Handlers for our generated code. 1.333 + os::register_code_area(_heap->low_boundary(), _heap->high_boundary()); 1.334 +} 1.335 + 1.336 + 1.337 +void codeCache_init() { 1.338 + CodeCache::initialize(); 1.339 +} 1.340 + 1.341 +//------------------------------------------------------------------------------------------------ 1.342 + 1.343 +int CodeCache::number_of_nmethods_with_dependencies() { 1.344 + return _number_of_nmethods_with_dependencies; 1.345 +} 1.346 + 1.347 +void CodeCache::clear_inline_caches() { 1.348 + assert_locked_or_safepoint(CodeCache_lock); 1.349 + FOR_ALL_ALIVE_NMETHODS(nm) { 1.350 + nm->clear_inline_caches(); 1.351 + } 1.352 +} 1.353 + 1.354 +#ifndef PRODUCT 1.355 +// used to keep track of how much time is spent in mark_for_deoptimization 1.356 +static elapsedTimer dependentCheckTime; 1.357 +static int dependentCheckCount = 0; 1.358 +#endif // PRODUCT 1.359 + 1.360 + 1.361 +int CodeCache::mark_for_deoptimization(DepChange& changes) { 1.362 + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1.363 + 1.364 +#ifndef PRODUCT 1.365 + dependentCheckTime.start(); 1.366 + dependentCheckCount++; 1.367 +#endif // PRODUCT 1.368 + 1.369 + int number_of_marked_CodeBlobs = 0; 1.370 + 1.371 + // search the hierarchy looking for nmethods which are affected by the loading of this class 1.372 + 1.373 + // then search the interfaces this class implements looking for nmethods 1.374 + // which might be dependent of the fact that an interface only had one 1.375 + // implementor. 1.376 + 1.377 + { No_Safepoint_Verifier nsv; 1.378 + for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1.379 + klassOop d = str.klass(); 1.380 + number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes); 1.381 + } 1.382 + } 1.383 + 1.384 + if (VerifyDependencies) { 1.385 + // Turn off dependency tracing while actually testing deps. 1.386 + NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); 1.387 + FOR_ALL_ALIVE_NMETHODS(nm) { 1.388 + if (!nm->is_marked_for_deoptimization() && 1.389 + nm->check_all_dependencies()) { 1.390 + ResourceMark rm; 1.391 + tty->print_cr("Should have been marked for deoptimization:"); 1.392 + changes.print(); 1.393 + nm->print(); 1.394 + nm->print_dependencies(); 1.395 + } 1.396 + } 1.397 + } 1.398 + 1.399 +#ifndef PRODUCT 1.400 + dependentCheckTime.stop(); 1.401 +#endif // PRODUCT 1.402 + 1.403 + return number_of_marked_CodeBlobs; 1.404 +} 1.405 + 1.406 + 1.407 +#ifdef HOTSWAP 1.408 +int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { 1.409 + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1.410 + int number_of_marked_CodeBlobs = 0; 1.411 + 1.412 + // Deoptimize all methods of the evolving class itself 1.413 + objArrayOop old_methods = dependee->methods(); 1.414 + for (int i = 0; i < old_methods->length(); i++) { 1.415 + ResourceMark rm; 1.416 + methodOop old_method = (methodOop) old_methods->obj_at(i); 1.417 + nmethod *nm = old_method->code(); 1.418 + if (nm != NULL) { 1.419 + nm->mark_for_deoptimization(); 1.420 + number_of_marked_CodeBlobs++; 1.421 + } 1.422 + } 1.423 + 1.424 + FOR_ALL_ALIVE_NMETHODS(nm) { 1.425 + if (nm->is_marked_for_deoptimization()) { 1.426 + // ...Already marked in the previous pass; don't count it again. 1.427 + } else if (nm->is_evol_dependent_on(dependee())) { 1.428 + ResourceMark rm; 1.429 + nm->mark_for_deoptimization(); 1.430 + number_of_marked_CodeBlobs++; 1.431 + } else { 1.432 + // flush caches in case they refer to a redefined methodOop 1.433 + nm->clear_inline_caches(); 1.434 + } 1.435 + } 1.436 + 1.437 + return number_of_marked_CodeBlobs; 1.438 +} 1.439 +#endif // HOTSWAP 1.440 + 1.441 + 1.442 +// Deoptimize all methods 1.443 +void CodeCache::mark_all_nmethods_for_deoptimization() { 1.444 + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1.445 + FOR_ALL_ALIVE_NMETHODS(nm) { 1.446 + nm->mark_for_deoptimization(); 1.447 + } 1.448 +} 1.449 + 1.450 + 1.451 +int CodeCache::mark_for_deoptimization(methodOop dependee) { 1.452 + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1.453 + int number_of_marked_CodeBlobs = 0; 1.454 + 1.455 + FOR_ALL_ALIVE_NMETHODS(nm) { 1.456 + if (nm->is_dependent_on_method(dependee)) { 1.457 + ResourceMark rm; 1.458 + nm->mark_for_deoptimization(); 1.459 + number_of_marked_CodeBlobs++; 1.460 + } 1.461 + } 1.462 + 1.463 + return number_of_marked_CodeBlobs; 1.464 +} 1.465 + 1.466 +void CodeCache::make_marked_nmethods_zombies() { 1.467 + assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 1.468 + FOR_ALL_ALIVE_NMETHODS(nm) { 1.469 + if (nm->is_marked_for_deoptimization()) { 1.470 + 1.471 + // If the nmethod has already been made non-entrant and it can be converted 1.472 + // then zombie it now. Otherwise make it non-entrant and it will eventually 1.473 + // be zombied when it is no longer seen on the stack. Note that the nmethod 1.474 + // might be "entrant" and not on the stack and so could be zombied immediately 1.475 + // but we can't tell because we don't track it on stack until it becomes 1.476 + // non-entrant. 1.477 + 1.478 + if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 1.479 + nm->make_zombie(); 1.480 + } else { 1.481 + nm->make_not_entrant(); 1.482 + } 1.483 + } 1.484 + } 1.485 +} 1.486 + 1.487 +void CodeCache::make_marked_nmethods_not_entrant() { 1.488 + assert_locked_or_safepoint(CodeCache_lock); 1.489 + FOR_ALL_ALIVE_NMETHODS(nm) { 1.490 + if (nm->is_marked_for_deoptimization()) { 1.491 + nm->make_not_entrant(); 1.492 + } 1.493 + } 1.494 +} 1.495 + 1.496 +void CodeCache::verify() { 1.497 + _heap->verify(); 1.498 + FOR_ALL_ALIVE_BLOBS(p) { 1.499 + p->verify(); 1.500 + } 1.501 +} 1.502 + 1.503 +//------------------------------------------------------------------------------------------------ 1.504 +// Non-product version 1.505 + 1.506 +#ifndef PRODUCT 1.507 + 1.508 +void CodeCache::verify_if_often() { 1.509 + if (VerifyCodeCacheOften) { 1.510 + _heap->verify(); 1.511 + } 1.512 +} 1.513 + 1.514 +void CodeCache::print_internals() { 1.515 + int nmethodCount = 0; 1.516 + int runtimeStubCount = 0; 1.517 + int adapterCount = 0; 1.518 + int deoptimizationStubCount = 0; 1.519 + int uncommonTrapStubCount = 0; 1.520 + int bufferBlobCount = 0; 1.521 + int total = 0; 1.522 + int nmethodAlive = 0; 1.523 + int nmethodNotEntrant = 0; 1.524 + int nmethodZombie = 0; 1.525 + int nmethodUnloaded = 0; 1.526 + int nmethodJava = 0; 1.527 + int nmethodNative = 0; 1.528 + int maxCodeSize = 0; 1.529 + ResourceMark rm; 1.530 + 1.531 + CodeBlob *cb; 1.532 + for (cb = first(); cb != NULL; cb = next(cb)) { 1.533 + total++; 1.534 + if (cb->is_nmethod()) { 1.535 + nmethod* nm = (nmethod*)cb; 1.536 + 1.537 + if (Verbose && nm->method() != NULL) { 1.538 + ResourceMark rm; 1.539 + char *method_name = nm->method()->name_and_sig_as_C_string(); 1.540 + tty->print("%s", method_name); 1.541 + if(nm->is_alive()) { tty->print_cr(" alive"); } 1.542 + if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } 1.543 + if(nm->is_zombie()) { tty->print_cr(" zombie"); } 1.544 + } 1.545 + 1.546 + nmethodCount++; 1.547 + 1.548 + if(nm->is_alive()) { nmethodAlive++; } 1.549 + if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1.550 + if(nm->is_zombie()) { nmethodZombie++; } 1.551 + if(nm->is_unloaded()) { nmethodUnloaded++; } 1.552 + if(nm->is_native_method()) { nmethodNative++; } 1.553 + 1.554 + if(nm->method() != NULL && nm->is_java_method()) { 1.555 + nmethodJava++; 1.556 + if(nm->code_size() > maxCodeSize) { 1.557 + maxCodeSize = nm->code_size(); 1.558 + } 1.559 + } 1.560 + } else if (cb->is_runtime_stub()) { 1.561 + runtimeStubCount++; 1.562 + } else if (cb->is_deoptimization_stub()) { 1.563 + deoptimizationStubCount++; 1.564 + } else if (cb->is_uncommon_trap_stub()) { 1.565 + uncommonTrapStubCount++; 1.566 + } else if (cb->is_adapter_blob()) { 1.567 + adapterCount++; 1.568 + } else if (cb->is_buffer_blob()) { 1.569 + bufferBlobCount++; 1.570 + } 1.571 + } 1.572 + 1.573 + int bucketSize = 512; 1.574 + int bucketLimit = maxCodeSize / bucketSize + 1; 1.575 + int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit); 1.576 + memset(buckets,0,sizeof(int) * bucketLimit); 1.577 + 1.578 + for (cb = first(); cb != NULL; cb = next(cb)) { 1.579 + if (cb->is_nmethod()) { 1.580 + nmethod* nm = (nmethod*)cb; 1.581 + if(nm->is_java_method()) { 1.582 + buckets[nm->code_size() / bucketSize]++; 1.583 + } 1.584 + } 1.585 + } 1.586 + tty->print_cr("Code Cache Entries (total of %d)",total); 1.587 + tty->print_cr("-------------------------------------------------"); 1.588 + tty->print_cr("nmethods: %d",nmethodCount); 1.589 + tty->print_cr("\talive: %d",nmethodAlive); 1.590 + tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1.591 + tty->print_cr("\tzombie: %d",nmethodZombie); 1.592 + tty->print_cr("\tunloaded: %d",nmethodUnloaded); 1.593 + tty->print_cr("\tjava: %d",nmethodJava); 1.594 + tty->print_cr("\tnative: %d",nmethodNative); 1.595 + tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1.596 + tty->print_cr("adapters: %d",adapterCount); 1.597 + tty->print_cr("buffer blobs: %d",bufferBlobCount); 1.598 + tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1.599 + tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1.600 + tty->print_cr("\nnmethod size distribution (non-zombie java)"); 1.601 + tty->print_cr("-------------------------------------------------"); 1.602 + 1.603 + for(int i=0; i<bucketLimit; i++) { 1.604 + if(buckets[i] != 0) { 1.605 + tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1.606 + tty->fill_to(40); 1.607 + tty->print_cr("%d",buckets[i]); 1.608 + } 1.609 + } 1.610 + 1.611 + FREE_C_HEAP_ARRAY(int, buckets); 1.612 +} 1.613 + 1.614 +void CodeCache::print() { 1.615 + CodeBlob_sizes live; 1.616 + CodeBlob_sizes dead; 1.617 + 1.618 + FOR_ALL_BLOBS(p) { 1.619 + if (!p->is_alive()) { 1.620 + dead.add(p); 1.621 + } else { 1.622 + live.add(p); 1.623 + } 1.624 + } 1.625 + 1.626 + tty->print_cr("CodeCache:"); 1.627 + 1.628 + tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(), 1.629 + dependentCheckTime.seconds() / dependentCheckCount); 1.630 + 1.631 + if (!live.is_empty()) { 1.632 + live.print("live"); 1.633 + } 1.634 + if (!dead.is_empty()) { 1.635 + dead.print("dead"); 1.636 + } 1.637 + 1.638 + 1.639 + if (Verbose) { 1.640 + // print the oop_map usage 1.641 + int code_size = 0; 1.642 + int number_of_blobs = 0; 1.643 + int number_of_oop_maps = 0; 1.644 + int map_size = 0; 1.645 + FOR_ALL_BLOBS(p) { 1.646 + if (p->is_alive()) { 1.647 + number_of_blobs++; 1.648 + code_size += p->instructions_size(); 1.649 + OopMapSet* set = p->oop_maps(); 1.650 + if (set != NULL) { 1.651 + number_of_oop_maps += set->size(); 1.652 + map_size += set->heap_size(); 1.653 + } 1.654 + } 1.655 + } 1.656 + tty->print_cr("OopMaps"); 1.657 + tty->print_cr(" #blobs = %d", number_of_blobs); 1.658 + tty->print_cr(" code size = %d", code_size); 1.659 + tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1.660 + tty->print_cr(" map size = %d", map_size); 1.661 + } 1.662 + 1.663 +} 1.664 + 1.665 +#endif // PRODUCT