aoqi@0: /* aoqi@0: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "code/codeBlob.hpp" aoqi@0: #include "code/codeCache.hpp" aoqi@0: #include "code/compiledIC.hpp" aoqi@0: #include "code/dependencies.hpp" aoqi@0: #include "code/icBuffer.hpp" aoqi@0: #include "code/nmethod.hpp" aoqi@0: #include "code/pcDesc.hpp" aoqi@0: #include "compiler/compileBroker.hpp" aoqi@0: #include "gc_implementation/shared/markSweep.hpp" aoqi@0: #include "memory/allocation.inline.hpp" aoqi@0: #include "memory/gcLocker.hpp" aoqi@0: #include "memory/iterator.hpp" aoqi@0: #include "memory/resourceArea.hpp" aoqi@0: #include "oops/method.hpp" aoqi@0: #include "oops/objArrayOop.hpp" aoqi@0: #include "oops/oop.inline.hpp" aoqi@0: #include "runtime/handles.inline.hpp" aoqi@0: #include "runtime/arguments.hpp" aoqi@0: #include "runtime/icache.hpp" aoqi@0: #include "runtime/java.hpp" aoqi@0: #include "runtime/mutexLocker.hpp" aoqi@0: #include "services/memoryService.hpp" aoqi@0: #include "trace/tracing.hpp" aoqi@0: #include "utilities/xmlstream.hpp" aoqi@0: aoqi@0: // Helper class for printing in CodeCache aoqi@0: aoqi@0: class CodeBlob_sizes { aoqi@0: private: aoqi@0: int count; aoqi@0: int total_size; aoqi@0: int header_size; aoqi@0: int code_size; aoqi@0: int stub_size; aoqi@0: int relocation_size; aoqi@0: int scopes_oop_size; aoqi@0: int scopes_metadata_size; aoqi@0: int scopes_data_size; aoqi@0: int scopes_pcs_size; aoqi@0: aoqi@0: public: aoqi@0: CodeBlob_sizes() { aoqi@0: count = 0; aoqi@0: total_size = 0; aoqi@0: header_size = 0; aoqi@0: code_size = 0; aoqi@0: stub_size = 0; aoqi@0: relocation_size = 0; aoqi@0: scopes_oop_size = 0; aoqi@0: scopes_metadata_size = 0; aoqi@0: scopes_data_size = 0; aoqi@0: scopes_pcs_size = 0; aoqi@0: } aoqi@0: aoqi@0: int total() { return total_size; } aoqi@0: bool is_empty() { return count == 0; } aoqi@0: aoqi@0: void print(const char* title) { aoqi@0: tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])", aoqi@0: count, aoqi@0: title, aoqi@0: (int)(total() / K), aoqi@0: header_size * 100 / total_size, aoqi@0: relocation_size * 100 / total_size, aoqi@0: code_size * 100 / total_size, aoqi@0: stub_size * 100 / total_size, aoqi@0: scopes_oop_size * 100 / total_size, aoqi@0: scopes_metadata_size * 100 / total_size, aoqi@0: scopes_data_size * 100 / total_size, aoqi@0: scopes_pcs_size * 100 / total_size); aoqi@0: } aoqi@0: aoqi@0: void add(CodeBlob* cb) { aoqi@0: count++; aoqi@0: total_size += cb->size(); aoqi@0: header_size += cb->header_size(); aoqi@0: relocation_size += cb->relocation_size(); aoqi@0: if (cb->is_nmethod()) { aoqi@0: nmethod* nm = cb->as_nmethod_or_null(); aoqi@0: code_size += nm->insts_size(); aoqi@0: stub_size += nm->stub_size(); aoqi@0: aoqi@0: scopes_oop_size += nm->oops_size(); aoqi@0: scopes_metadata_size += nm->metadata_size(); aoqi@0: scopes_data_size += nm->scopes_data_size(); aoqi@0: scopes_pcs_size += nm->scopes_pcs_size(); aoqi@0: } else { aoqi@0: code_size += cb->code_size(); aoqi@0: } aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: // CodeCache implementation aoqi@0: aoqi@0: CodeHeap * CodeCache::_heap = new CodeHeap(); aoqi@0: int CodeCache::_number_of_blobs = 0; aoqi@0: int CodeCache::_number_of_adapters = 0; aoqi@0: int CodeCache::_number_of_nmethods = 0; aoqi@0: int CodeCache::_number_of_nmethods_with_dependencies = 0; aoqi@0: bool CodeCache::_needs_cache_clean = false; aoqi@0: nmethod* CodeCache::_scavenge_root_nmethods = NULL; aoqi@0: aoqi@0: int CodeCache::_codemem_full_count = 0; aoqi@0: aoqi@0: CodeBlob* CodeCache::first() { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: return (CodeBlob*)_heap->first(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: CodeBlob* CodeCache::next(CodeBlob* cb) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: return (CodeBlob*)_heap->next(cb); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: CodeBlob* CodeCache::alive(CodeBlob *cb) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: while (cb != NULL && !cb->is_alive()) cb = next(cb); aoqi@0: return cb; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: nmethod* CodeCache::alive_nmethod(CodeBlob* cb) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb); aoqi@0: return (nmethod*)cb; aoqi@0: } aoqi@0: aoqi@0: nmethod* CodeCache::first_nmethod() { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: CodeBlob* cb = first(); aoqi@0: while (cb != NULL && !cb->is_nmethod()) { aoqi@0: cb = next(cb); aoqi@0: } aoqi@0: return (nmethod*)cb; aoqi@0: } aoqi@0: aoqi@0: nmethod* CodeCache::next_nmethod (CodeBlob* cb) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: cb = next(cb); aoqi@0: while (cb != NULL && !cb->is_nmethod()) { aoqi@0: cb = next(cb); aoqi@0: } aoqi@0: return (nmethod*)cb; aoqi@0: } aoqi@0: aoqi@0: static size_t maxCodeCacheUsed = 0; aoqi@0: aoqi@0: CodeBlob* CodeCache::allocate(int size, bool is_critical) { aoqi@0: // Do not seize the CodeCache lock here--if the caller has not aoqi@0: // already done so, we are going to lose bigtime, since the code aoqi@0: // cache will contain a garbage CodeBlob until the caller can aoqi@0: // run the constructor for the CodeBlob subclass he is busy aoqi@0: // instantiating. aoqi@0: guarantee(size >= 0, "allocation request must be reasonable"); aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: CodeBlob* cb = NULL; aoqi@0: _number_of_blobs++; aoqi@0: while (true) { aoqi@0: cb = (CodeBlob*)_heap->allocate(size, is_critical); aoqi@0: if (cb != NULL) break; aoqi@0: if (!_heap->expand_by(CodeCacheExpansionSize)) { aoqi@0: // Expansion failed aoqi@0: return NULL; aoqi@0: } aoqi@0: if (PrintCodeCacheExtension) { aoqi@0: ResourceMark rm; aoqi@0: tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)", aoqi@0: (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(), aoqi@0: (address)_heap->high() - (address)_heap->low_boundary()); aoqi@0: } aoqi@0: } aoqi@0: maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() - aoqi@0: (address)_heap->low_boundary()) - unallocated_capacity()); aoqi@0: verify_if_often(); aoqi@0: print_trace("allocation", cb, size); aoqi@0: return cb; aoqi@0: } aoqi@0: aoqi@0: void CodeCache::free(CodeBlob* cb) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: verify_if_often(); aoqi@0: aoqi@0: print_trace("free", cb); aoqi@0: if (cb->is_nmethod()) { aoqi@0: _number_of_nmethods--; aoqi@0: if (((nmethod *)cb)->has_dependencies()) { aoqi@0: _number_of_nmethods_with_dependencies--; aoqi@0: } aoqi@0: } aoqi@0: if (cb->is_adapter_blob()) { aoqi@0: _number_of_adapters--; aoqi@0: } aoqi@0: _number_of_blobs--; aoqi@0: aoqi@0: _heap->deallocate(cb); aoqi@0: aoqi@0: verify_if_often(); aoqi@0: assert(_number_of_blobs >= 0, "sanity check"); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void CodeCache::commit(CodeBlob* cb) { aoqi@0: // this is called by nmethod::nmethod, which must already own CodeCache_lock aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: if (cb->is_nmethod()) { aoqi@0: _number_of_nmethods++; aoqi@0: if (((nmethod *)cb)->has_dependencies()) { aoqi@0: _number_of_nmethods_with_dependencies++; aoqi@0: } aoqi@0: } aoqi@0: if (cb->is_adapter_blob()) { aoqi@0: _number_of_adapters++; aoqi@0: } aoqi@0: aoqi@0: // flush the hardware I-cache aoqi@0: ICache::invalidate_range(cb->content_begin(), cb->content_size()); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void CodeCache::flush() { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: Unimplemented(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Iteration over CodeBlobs aoqi@0: aoqi@0: #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) ) aoqi@0: #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var))) aoqi@0: #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var))) aoqi@0: aoqi@0: aoqi@0: bool CodeCache::contains(void *p) { aoqi@0: // It should be ok to call contains without holding a lock aoqi@0: return _heap->contains(p); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not aoqi@0: // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain aoqi@0: // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. aoqi@0: CodeBlob* CodeCache::find_blob(void* start) { aoqi@0: CodeBlob* result = find_blob_unsafe(start); aoqi@0: if (result == NULL) return NULL; aoqi@0: // We could potientially look up non_entrant methods aoqi@0: guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method"); aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: nmethod* CodeCache::find_nmethod(void* start) { aoqi@0: CodeBlob *cb = find_blob(start); aoqi@0: assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod"); aoqi@0: return (nmethod*)cb; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void CodeCache::blobs_do(void f(CodeBlob* nm)) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: FOR_ALL_BLOBS(p) { aoqi@0: f(p); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void CodeCache::nmethods_do(void f(nmethod* nm)) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: FOR_ALL_BLOBS(nm) { aoqi@0: if (nm->is_nmethod()) f((nmethod*)nm); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CodeCache::alive_nmethods_do(void f(nmethod* nm)) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: FOR_ALL_ALIVE_NMETHODS(nm) { aoqi@0: f(nm); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: int CodeCache::alignment_unit() { aoqi@0: return (int)_heap->alignment_unit(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: int CodeCache::alignment_offset() { aoqi@0: return (int)_heap->alignment_offset(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Mark nmethods for unloading if they contain otherwise unreachable aoqi@0: // oops. aoqi@0: void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: FOR_ALL_ALIVE_NMETHODS(nm) { aoqi@0: nm->do_unloading(is_alive, unloading_occurred); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CodeCache::blobs_do(CodeBlobClosure* f) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: FOR_ALL_ALIVE_BLOBS(cb) { aoqi@0: f->do_code_blob(cb); aoqi@0: aoqi@0: #ifdef ASSERT aoqi@0: if (cb->is_nmethod()) aoqi@0: ((nmethod*)cb)->verify_scavenge_root_oops(); aoqi@0: #endif //ASSERT aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Walk the list of methods which might contain non-perm oops. aoqi@0: void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); stefank@6992: stefank@6992: if (UseG1GC) { stefank@6992: return; stefank@6992: } stefank@6992: aoqi@0: debug_only(mark_scavenge_root_nmethods()); aoqi@0: aoqi@0: for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { aoqi@0: debug_only(cur->clear_scavenge_root_marked()); aoqi@0: assert(cur->scavenge_root_not_marked(), ""); aoqi@0: assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); aoqi@0: aoqi@0: bool is_live = (!cur->is_zombie() && !cur->is_unloaded()); aoqi@0: #ifndef PRODUCT aoqi@0: if (TraceScavenge) { aoqi@0: cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); aoqi@0: } aoqi@0: #endif //PRODUCT aoqi@0: if (is_live) { aoqi@0: // Perform cur->oops_do(f), maybe just once per nmethod. aoqi@0: f->do_code_blob(cur); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Check for stray marks. aoqi@0: debug_only(verify_perm_nmethods(NULL)); aoqi@0: } aoqi@0: aoqi@0: void CodeCache::add_scavenge_root_nmethod(nmethod* nm) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); stefank@6992: stefank@6992: if (UseG1GC) { stefank@6992: return; stefank@6992: } stefank@6992: aoqi@0: nm->set_on_scavenge_root_list(); aoqi@0: nm->set_scavenge_root_link(_scavenge_root_nmethods); aoqi@0: set_scavenge_root_nmethods(nm); aoqi@0: print_trace("add_scavenge_root", nm); aoqi@0: } aoqi@0: aoqi@0: void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); stefank@6992: stefank@6992: if (UseG1GC) { stefank@6992: return; stefank@6992: } stefank@6992: aoqi@0: print_trace("drop_scavenge_root", nm); aoqi@0: nmethod* last = NULL; aoqi@0: nmethod* cur = scavenge_root_nmethods(); aoqi@0: while (cur != NULL) { aoqi@0: nmethod* next = cur->scavenge_root_link(); aoqi@0: if (cur == nm) { aoqi@0: if (last != NULL) aoqi@0: last->set_scavenge_root_link(next); aoqi@0: else set_scavenge_root_nmethods(next); aoqi@0: nm->set_scavenge_root_link(NULL); aoqi@0: nm->clear_on_scavenge_root_list(); aoqi@0: return; aoqi@0: } aoqi@0: last = cur; aoqi@0: cur = next; aoqi@0: } aoqi@0: assert(false, "should have been on list"); aoqi@0: } aoqi@0: aoqi@0: void CodeCache::prune_scavenge_root_nmethods() { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); stefank@6992: stefank@6992: if (UseG1GC) { stefank@6992: return; stefank@6992: } stefank@6992: aoqi@0: debug_only(mark_scavenge_root_nmethods()); aoqi@0: aoqi@0: nmethod* last = NULL; aoqi@0: nmethod* cur = scavenge_root_nmethods(); aoqi@0: while (cur != NULL) { aoqi@0: nmethod* next = cur->scavenge_root_link(); aoqi@0: debug_only(cur->clear_scavenge_root_marked()); aoqi@0: assert(cur->scavenge_root_not_marked(), ""); aoqi@0: assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); aoqi@0: aoqi@0: if (!cur->is_zombie() && !cur->is_unloaded() aoqi@0: && cur->detect_scavenge_root_oops()) { aoqi@0: // Keep it. Advance 'last' to prevent deletion. aoqi@0: last = cur; aoqi@0: } else { aoqi@0: // Prune it from the list, so we don't have to look at it any more. aoqi@0: print_trace("prune_scavenge_root", cur); aoqi@0: cur->set_scavenge_root_link(NULL); aoqi@0: cur->clear_on_scavenge_root_list(); aoqi@0: if (last != NULL) aoqi@0: last->set_scavenge_root_link(next); aoqi@0: else set_scavenge_root_nmethods(next); aoqi@0: } aoqi@0: cur = next; aoqi@0: } aoqi@0: aoqi@0: // Check for stray marks. aoqi@0: debug_only(verify_perm_nmethods(NULL)); aoqi@0: } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) { stefank@6992: if (UseG1GC) { stefank@6992: return; stefank@6992: } stefank@6992: aoqi@0: // While we are here, verify the integrity of the list. aoqi@0: mark_scavenge_root_nmethods(); aoqi@0: for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) { aoqi@0: assert(cur->on_scavenge_root_list(), "else shouldn't be on this list"); aoqi@0: cur->clear_scavenge_root_marked(); aoqi@0: } aoqi@0: verify_perm_nmethods(f); aoqi@0: } aoqi@0: aoqi@0: // Temporarily mark nmethods that are claimed to be on the non-perm list. aoqi@0: void CodeCache::mark_scavenge_root_nmethods() { aoqi@0: FOR_ALL_ALIVE_BLOBS(cb) { aoqi@0: if (cb->is_nmethod()) { aoqi@0: nmethod *nm = (nmethod*)cb; aoqi@0: assert(nm->scavenge_root_not_marked(), "clean state"); aoqi@0: if (nm->on_scavenge_root_list()) aoqi@0: nm->set_scavenge_root_marked(); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // If the closure is given, run it on the unlisted nmethods. aoqi@0: // Also make sure that the effects of mark_scavenge_root_nmethods is gone. aoqi@0: void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) { aoqi@0: FOR_ALL_ALIVE_BLOBS(cb) { aoqi@0: bool call_f = (f_or_null != NULL); aoqi@0: if (cb->is_nmethod()) { aoqi@0: nmethod *nm = (nmethod*)cb; aoqi@0: assert(nm->scavenge_root_not_marked(), "must be already processed"); aoqi@0: if (nm->on_scavenge_root_list()) aoqi@0: call_f = false; // don't show this one to the client aoqi@0: nm->verify_scavenge_root_oops(); aoqi@0: } else { aoqi@0: call_f = false; // not an nmethod aoqi@0: } aoqi@0: if (call_f) f_or_null->do_code_blob(cb); aoqi@0: } aoqi@0: } aoqi@0: #endif //PRODUCT aoqi@0: stefank@6992: void CodeCache::verify_clean_inline_caches() { stefank@6992: #ifdef ASSERT stefank@6992: FOR_ALL_ALIVE_BLOBS(cb) { stefank@6992: if (cb->is_nmethod()) { stefank@6992: nmethod* nm = (nmethod*)cb; stefank@6992: assert(!nm->is_unloaded(), "Tautology"); stefank@6992: nm->verify_clean_inline_caches(); stefank@6992: nm->verify(); stefank@6992: } stefank@6992: } stefank@6992: #endif stefank@6992: } stefank@6992: stefank@6992: void CodeCache::verify_icholder_relocations() { stefank@6992: #ifdef ASSERT stefank@6992: // make sure that we aren't leaking icholders stefank@6992: int count = 0; stefank@6992: FOR_ALL_BLOBS(cb) { stefank@6992: if (cb->is_nmethod()) { stefank@6992: nmethod* nm = (nmethod*)cb; stefank@6992: count += nm->verify_icholder_relocations(); stefank@6992: } stefank@6992: } stefank@6992: stefank@6992: assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == stefank@6992: CompiledICHolder::live_count(), "must agree"); stefank@6992: #endif stefank@6992: } aoqi@0: aoqi@0: void CodeCache::gc_prologue() { aoqi@0: } aoqi@0: aoqi@0: void CodeCache::gc_epilogue() { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); thartmann@8075: NOT_DEBUG(if (needs_cache_clean())) { thartmann@8075: FOR_ALL_ALIVE_BLOBS(cb) { thartmann@8075: if (cb->is_nmethod()) { thartmann@8075: nmethod *nm = (nmethod*)cb; thartmann@8075: assert(!nm->is_unloaded(), "Tautology"); thartmann@8075: DEBUG_ONLY(if (needs_cache_clean())) { thartmann@8074: nm->cleanup_inline_caches(); thartmann@8074: } thartmann@8074: DEBUG_ONLY(nm->verify()); thartmann@8074: DEBUG_ONLY(nm->verify_oop_relocations()); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: set_needs_cache_clean(false); aoqi@0: prune_scavenge_root_nmethods(); aoqi@0: stefank@6992: verify_icholder_relocations(); aoqi@0: } aoqi@0: aoqi@0: void CodeCache::verify_oops() { aoqi@0: MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); aoqi@0: VerifyOopClosure voc; aoqi@0: FOR_ALL_ALIVE_BLOBS(cb) { aoqi@0: if (cb->is_nmethod()) { aoqi@0: nmethod *nm = (nmethod*)cb; aoqi@0: nm->oops_do(&voc); aoqi@0: nm->verify_oop_relocations(); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: address CodeCache::first_address() { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: return (address)_heap->low_boundary(); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: address CodeCache::last_address() { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: return (address)_heap->high(); aoqi@0: } aoqi@0: aoqi@0: /** aoqi@0: * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache aoqi@0: * is free, reverse_free_ratio() returns 4. aoqi@0: */ aoqi@0: double CodeCache::reverse_free_ratio() { aoqi@0: double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace); aoqi@0: double max_capacity = (double)CodeCache::max_capacity(); aoqi@0: return max_capacity / unallocated_capacity; aoqi@0: } aoqi@0: aoqi@0: void icache_init(); aoqi@0: aoqi@0: void CodeCache::initialize() { aoqi@0: assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); aoqi@0: #ifdef COMPILER2 aoqi@0: assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); aoqi@0: #endif aoqi@0: assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); aoqi@0: // This was originally just a check of the alignment, causing failure, instead, round aoqi@0: // the code cache to the page size. In particular, Solaris is moving to a larger aoqi@0: // default page size. aoqi@0: CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size()); aoqi@0: InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size()); aoqi@0: ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size()); aoqi@0: if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) { aoqi@0: vm_exit_during_initialization("Could not reserve enough space for code cache"); aoqi@0: } aoqi@0: aoqi@0: MemoryService::add_code_heap_memory_pool(_heap); aoqi@0: aoqi@0: // Initialize ICache flush mechanism aoqi@0: // This service is needed for os::register_code_area aoqi@0: icache_init(); aoqi@0: aoqi@0: // Give OS a chance to register generated code area. aoqi@0: // This is used on Windows 64 bit platforms to register aoqi@0: // Structured Exception Handlers for our generated code. aoqi@0: os::register_code_area(_heap->low_boundary(), _heap->high_boundary()); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void codeCache_init() { aoqi@0: CodeCache::initialize(); aoqi@0: } aoqi@0: aoqi@0: //------------------------------------------------------------------------------------------------ aoqi@0: aoqi@0: int CodeCache::number_of_nmethods_with_dependencies() { aoqi@0: return _number_of_nmethods_with_dependencies; aoqi@0: } aoqi@0: aoqi@0: void CodeCache::clear_inline_caches() { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: FOR_ALL_ALIVE_NMETHODS(nm) { aoqi@0: nm->clear_inline_caches(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: // used to keep track of how much time is spent in mark_for_deoptimization aoqi@0: static elapsedTimer dependentCheckTime; aoqi@0: static int dependentCheckCount = 0; aoqi@0: #endif // PRODUCT aoqi@0: aoqi@0: aoqi@0: int CodeCache::mark_for_deoptimization(DepChange& changes) { aoqi@0: MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: dependentCheckTime.start(); aoqi@0: dependentCheckCount++; aoqi@0: #endif // PRODUCT aoqi@0: aoqi@0: int number_of_marked_CodeBlobs = 0; aoqi@0: aoqi@0: // search the hierarchy looking for nmethods which are affected by the loading of this class aoqi@0: aoqi@0: // then search the interfaces this class implements looking for nmethods aoqi@0: // which might be dependent of the fact that an interface only had one aoqi@0: // implementor. aoqi@0: aoqi@0: { No_Safepoint_Verifier nsv; aoqi@0: for (DepChange::ContextStream str(changes, nsv); str.next(); ) { aoqi@0: Klass* d = str.klass(); aoqi@0: number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (VerifyDependencies) { aoqi@0: // Turn off dependency tracing while actually testing deps. aoqi@0: NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) ); aoqi@0: FOR_ALL_ALIVE_NMETHODS(nm) { aoqi@0: if (!nm->is_marked_for_deoptimization() && aoqi@0: nm->check_all_dependencies()) { aoqi@0: ResourceMark rm; aoqi@0: tty->print_cr("Should have been marked for deoptimization:"); aoqi@0: changes.print(); aoqi@0: nm->print(); aoqi@0: nm->print_dependencies(); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: dependentCheckTime.stop(); aoqi@0: #endif // PRODUCT aoqi@0: aoqi@0: return number_of_marked_CodeBlobs; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: #ifdef HOTSWAP aoqi@0: int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { aoqi@0: MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); aoqi@0: int number_of_marked_CodeBlobs = 0; aoqi@0: aoqi@0: // Deoptimize all methods of the evolving class itself aoqi@0: Array* old_methods = dependee->methods(); aoqi@0: for (int i = 0; i < old_methods->length(); i++) { aoqi@0: ResourceMark rm; aoqi@0: Method* old_method = old_methods->at(i); aoqi@0: nmethod *nm = old_method->code(); aoqi@0: if (nm != NULL) { aoqi@0: nm->mark_for_deoptimization(); aoqi@0: number_of_marked_CodeBlobs++; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: FOR_ALL_ALIVE_NMETHODS(nm) { aoqi@0: if (nm->is_marked_for_deoptimization()) { aoqi@0: // ...Already marked in the previous pass; don't count it again. aoqi@0: } else if (nm->is_evol_dependent_on(dependee())) { aoqi@0: ResourceMark rm; aoqi@0: nm->mark_for_deoptimization(); aoqi@0: number_of_marked_CodeBlobs++; aoqi@0: } else { aoqi@0: // flush caches in case they refer to a redefined Method* aoqi@0: nm->clear_inline_caches(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: return number_of_marked_CodeBlobs; aoqi@0: } aoqi@0: #endif // HOTSWAP aoqi@0: aoqi@0: aoqi@0: // Deoptimize all methods aoqi@0: void CodeCache::mark_all_nmethods_for_deoptimization() { aoqi@0: MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); aoqi@0: FOR_ALL_ALIVE_NMETHODS(nm) { iveresov@7146: if (!nm->method()->is_method_handle_intrinsic()) { iveresov@7146: nm->mark_for_deoptimization(); iveresov@7146: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: int CodeCache::mark_for_deoptimization(Method* dependee) { aoqi@0: MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); aoqi@0: int number_of_marked_CodeBlobs = 0; aoqi@0: aoqi@0: FOR_ALL_ALIVE_NMETHODS(nm) { aoqi@0: if (nm->is_dependent_on_method(dependee)) { aoqi@0: ResourceMark rm; aoqi@0: nm->mark_for_deoptimization(); aoqi@0: number_of_marked_CodeBlobs++; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: return number_of_marked_CodeBlobs; aoqi@0: } aoqi@0: aoqi@0: void CodeCache::make_marked_nmethods_not_entrant() { aoqi@0: assert_locked_or_safepoint(CodeCache_lock); aoqi@0: FOR_ALL_ALIVE_NMETHODS(nm) { aoqi@0: if (nm->is_marked_for_deoptimization()) { aoqi@0: nm->make_not_entrant(); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CodeCache::verify() { aoqi@0: _heap->verify(); aoqi@0: FOR_ALL_ALIVE_BLOBS(p) { aoqi@0: p->verify(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CodeCache::report_codemem_full() { aoqi@0: _codemem_full_count++; aoqi@0: EventCodeCacheFull event; aoqi@0: if (event.should_commit()) { aoqi@0: event.set_startAddress((u8)low_bound()); aoqi@0: event.set_commitedTopAddress((u8)high()); aoqi@0: event.set_reservedTopAddress((u8)high_bound()); aoqi@0: event.set_entryCount(nof_blobs()); aoqi@0: event.set_methodCount(nof_nmethods()); aoqi@0: event.set_adaptorCount(nof_adapters()); aoqi@0: event.set_unallocatedCapacity(unallocated_capacity()/K); aoqi@0: event.set_fullCount(_codemem_full_count); aoqi@0: event.commit(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: //------------------------------------------------------------------------------------------------ aoqi@0: // Non-product version aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: aoqi@0: void CodeCache::verify_if_often() { aoqi@0: if (VerifyCodeCacheOften) { aoqi@0: _heap->verify(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { aoqi@0: if (PrintCodeCache2) { // Need to add a new flag aoqi@0: ResourceMark rm; aoqi@0: if (size == 0) size = cb->size(); aoqi@0: tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CodeCache::print_internals() { aoqi@0: int nmethodCount = 0; aoqi@0: int runtimeStubCount = 0; aoqi@0: int adapterCount = 0; aoqi@0: int deoptimizationStubCount = 0; aoqi@0: int uncommonTrapStubCount = 0; aoqi@0: int bufferBlobCount = 0; aoqi@0: int total = 0; aoqi@0: int nmethodAlive = 0; aoqi@0: int nmethodNotEntrant = 0; aoqi@0: int nmethodZombie = 0; aoqi@0: int nmethodUnloaded = 0; aoqi@0: int nmethodJava = 0; aoqi@0: int nmethodNative = 0; aoqi@0: int maxCodeSize = 0; aoqi@0: ResourceMark rm; aoqi@0: aoqi@0: CodeBlob *cb; aoqi@0: for (cb = first(); cb != NULL; cb = next(cb)) { aoqi@0: total++; aoqi@0: if (cb->is_nmethod()) { aoqi@0: nmethod* nm = (nmethod*)cb; aoqi@0: aoqi@0: if (Verbose && nm->method() != NULL) { aoqi@0: ResourceMark rm; aoqi@0: char *method_name = nm->method()->name_and_sig_as_C_string(); aoqi@0: tty->print("%s", method_name); aoqi@0: if(nm->is_alive()) { tty->print_cr(" alive"); } aoqi@0: if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); } aoqi@0: if(nm->is_zombie()) { tty->print_cr(" zombie"); } aoqi@0: } aoqi@0: aoqi@0: nmethodCount++; aoqi@0: aoqi@0: if(nm->is_alive()) { nmethodAlive++; } aoqi@0: if(nm->is_not_entrant()) { nmethodNotEntrant++; } aoqi@0: if(nm->is_zombie()) { nmethodZombie++; } aoqi@0: if(nm->is_unloaded()) { nmethodUnloaded++; } aoqi@0: if(nm->is_native_method()) { nmethodNative++; } aoqi@0: aoqi@0: if(nm->method() != NULL && nm->is_java_method()) { aoqi@0: nmethodJava++; aoqi@0: if (nm->insts_size() > maxCodeSize) { aoqi@0: maxCodeSize = nm->insts_size(); aoqi@0: } aoqi@0: } aoqi@0: } else if (cb->is_runtime_stub()) { aoqi@0: runtimeStubCount++; aoqi@0: } else if (cb->is_deoptimization_stub()) { aoqi@0: deoptimizationStubCount++; aoqi@0: } else if (cb->is_uncommon_trap_stub()) { aoqi@0: uncommonTrapStubCount++; aoqi@0: } else if (cb->is_adapter_blob()) { aoqi@0: adapterCount++; aoqi@0: } else if (cb->is_buffer_blob()) { aoqi@0: bufferBlobCount++; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: int bucketSize = 512; aoqi@0: int bucketLimit = maxCodeSize / bucketSize + 1; aoqi@0: int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); aoqi@0: memset(buckets,0,sizeof(int) * bucketLimit); aoqi@0: aoqi@0: for (cb = first(); cb != NULL; cb = next(cb)) { aoqi@0: if (cb->is_nmethod()) { aoqi@0: nmethod* nm = (nmethod*)cb; aoqi@0: if(nm->is_java_method()) { aoqi@0: buckets[nm->insts_size() / bucketSize]++; aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: tty->print_cr("Code Cache Entries (total of %d)",total); aoqi@0: tty->print_cr("-------------------------------------------------"); aoqi@0: tty->print_cr("nmethods: %d",nmethodCount); aoqi@0: tty->print_cr("\talive: %d",nmethodAlive); aoqi@0: tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); aoqi@0: tty->print_cr("\tzombie: %d",nmethodZombie); aoqi@0: tty->print_cr("\tunloaded: %d",nmethodUnloaded); aoqi@0: tty->print_cr("\tjava: %d",nmethodJava); aoqi@0: tty->print_cr("\tnative: %d",nmethodNative); aoqi@0: tty->print_cr("runtime_stubs: %d",runtimeStubCount); aoqi@0: tty->print_cr("adapters: %d",adapterCount); aoqi@0: tty->print_cr("buffer blobs: %d",bufferBlobCount); aoqi@0: tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); aoqi@0: tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); aoqi@0: tty->print_cr("\nnmethod size distribution (non-zombie java)"); aoqi@0: tty->print_cr("-------------------------------------------------"); aoqi@0: aoqi@0: for(int i=0; iprint("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); aoqi@0: tty->fill_to(40); aoqi@0: tty->print_cr("%d",buckets[i]); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: FREE_C_HEAP_ARRAY(int, buckets, mtCode); aoqi@0: } aoqi@0: aoqi@0: #endif // !PRODUCT aoqi@0: aoqi@0: void CodeCache::print() { aoqi@0: print_summary(tty); aoqi@0: aoqi@0: #ifndef PRODUCT aoqi@0: if (!Verbose) return; aoqi@0: aoqi@0: CodeBlob_sizes live; aoqi@0: CodeBlob_sizes dead; aoqi@0: aoqi@0: FOR_ALL_BLOBS(p) { aoqi@0: if (!p->is_alive()) { aoqi@0: dead.add(p); aoqi@0: } else { aoqi@0: live.add(p); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: tty->print_cr("CodeCache:"); aoqi@0: aoqi@0: tty->print_cr("nmethod dependency checking time %f, per dependent %f", dependentCheckTime.seconds(), aoqi@0: dependentCheckTime.seconds() / dependentCheckCount); aoqi@0: aoqi@0: if (!live.is_empty()) { aoqi@0: live.print("live"); aoqi@0: } aoqi@0: if (!dead.is_empty()) { aoqi@0: dead.print("dead"); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: if (WizardMode) { aoqi@0: // print the oop_map usage aoqi@0: int code_size = 0; aoqi@0: int number_of_blobs = 0; aoqi@0: int number_of_oop_maps = 0; aoqi@0: int map_size = 0; aoqi@0: FOR_ALL_BLOBS(p) { aoqi@0: if (p->is_alive()) { aoqi@0: number_of_blobs++; aoqi@0: code_size += p->code_size(); aoqi@0: OopMapSet* set = p->oop_maps(); aoqi@0: if (set != NULL) { aoqi@0: number_of_oop_maps += set->size(); aoqi@0: map_size += set->heap_size(); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: tty->print_cr("OopMaps"); aoqi@0: tty->print_cr(" #blobs = %d", number_of_blobs); aoqi@0: tty->print_cr(" code size = %d", code_size); aoqi@0: tty->print_cr(" #oop_maps = %d", number_of_oop_maps); aoqi@0: tty->print_cr(" map size = %d", map_size); aoqi@0: } aoqi@0: aoqi@0: #endif // !PRODUCT aoqi@0: } aoqi@0: aoqi@0: void CodeCache::print_summary(outputStream* st, bool detailed) { aoqi@0: size_t total = (_heap->high_boundary() - _heap->low_boundary()); aoqi@0: st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT aoqi@0: "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb", aoqi@0: total/K, (total - unallocated_capacity())/K, aoqi@0: maxCodeCacheUsed/K, unallocated_capacity()/K); aoqi@0: aoqi@0: if (detailed) { aoqi@0: st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", aoqi@0: p2i(_heap->low_boundary()), aoqi@0: p2i(_heap->high()), aoqi@0: p2i(_heap->high_boundary())); aoqi@0: st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT aoqi@0: " adapters=" UINT32_FORMAT, aoqi@0: nof_blobs(), nof_nmethods(), nof_adapters()); aoqi@0: st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ? aoqi@0: "enabled" : Arguments::mode() == Arguments::_int ? aoqi@0: "disabled (interpreter mode)" : aoqi@0: "disabled (not enough contiguous free space left)"); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void CodeCache::log_state(outputStream* st) { aoqi@0: st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" aoqi@0: " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'", aoqi@0: nof_blobs(), nof_nmethods(), nof_adapters(), aoqi@0: unallocated_capacity()); aoqi@0: } aoqi@0: