aoqi@0: /* aoqi@0: * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #ifndef SHARE_VM_CODE_CODECACHE_HPP aoqi@0: #define SHARE_VM_CODE_CODECACHE_HPP aoqi@0: aoqi@0: #include "code/codeBlob.hpp" aoqi@0: #include "memory/allocation.hpp" aoqi@0: #include "memory/heap.hpp" aoqi@0: #include "oops/instanceKlass.hpp" aoqi@0: #include "oops/oopsHierarchy.hpp" aoqi@0: aoqi@0: // The CodeCache implements the code cache for various pieces of generated aoqi@0: // code, e.g., compiled java methods, runtime stubs, transition frames, etc. aoqi@0: // The entries in the CodeCache are all CodeBlob's. aoqi@0: aoqi@0: // Implementation: aoqi@0: // - Each CodeBlob occupies one chunk of memory. aoqi@0: // - Like the offset table in oldspace the zone has at table for aoqi@0: // locating a method given a addess of an instruction. aoqi@0: aoqi@0: class OopClosure; aoqi@0: class DepChange; aoqi@0: aoqi@0: class CodeCache : AllStatic { aoqi@0: friend class VMStructs; aoqi@0: private: aoqi@0: // CodeHeap is malloc()'ed at startup and never deleted during shutdown, aoqi@0: // so that the generated assembly code is always there when it's needed. aoqi@0: // This may cause memory leak, but is necessary, for now. See 4423824, aoqi@0: // 4422213 or 4436291 for details. aoqi@0: static CodeHeap * _heap; aoqi@0: static int _number_of_blobs; aoqi@0: static int _number_of_adapters; aoqi@0: static int _number_of_nmethods; aoqi@0: static int _number_of_nmethods_with_dependencies; aoqi@0: static bool _needs_cache_clean; aoqi@0: static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() aoqi@0: aoqi@0: static void verify_if_often() PRODUCT_RETURN; aoqi@0: aoqi@0: static void mark_scavenge_root_nmethods() PRODUCT_RETURN; aoqi@0: static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; aoqi@0: aoqi@0: static int _codemem_full_count; aoqi@0: aoqi@0: public: aoqi@0: aoqi@0: // Initialization aoqi@0: static void initialize(); aoqi@0: aoqi@0: static void report_codemem_full(); aoqi@0: aoqi@0: // Allocation/administration aoqi@0: static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob aoqi@0: static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled aoqi@0: static int alignment_unit(); // guaranteed alignment of all CodeBlobs aoqi@0: static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) aoqi@0: static void free(CodeBlob* cb); // frees a CodeBlob aoqi@0: static void flush(); // flushes all CodeBlobs aoqi@0: static bool contains(void *p); // returns whether p is included aoqi@0: static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs aoqi@0: static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs aoqi@0: static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods aoqi@0: static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods aoqi@0: aoqi@0: // Lookup aoqi@0: static CodeBlob* find_blob(void* start); aoqi@0: static nmethod* find_nmethod(void* start); aoqi@0: aoqi@0: // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know aoqi@0: // what you are doing) aoqi@0: static CodeBlob* find_blob_unsafe(void* start) { aoqi@0: // NMT can walk the stack before code cache is created aoqi@0: if (_heap == NULL) return NULL; aoqi@0: aoqi@0: CodeBlob* result = (CodeBlob*)_heap->find_start(start); aoqi@0: // this assert is too strong because the heap code will return the aoqi@0: // heapblock containing start. That block can often be larger than aoqi@0: // the codeBlob itself. If you look up an address that is within aoqi@0: // the heapblock but not in the codeBlob you will assert. aoqi@0: // aoqi@0: // Most things will not lookup such bad addresses. However aoqi@0: // AsyncGetCallTrace can see intermediate frames and get that kind aoqi@0: // of invalid address and so can a developer using hsfind. aoqi@0: // aoqi@0: // The more correct answer is to return NULL if blob_contains() returns aoqi@0: // false. aoqi@0: // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob"); aoqi@0: aoqi@0: if (result != NULL && !result->blob_contains((address)start)) { aoqi@0: result = NULL; aoqi@0: } aoqi@0: return result; aoqi@0: } aoqi@0: aoqi@0: // Iteration aoqi@0: static CodeBlob* first(); aoqi@0: static CodeBlob* next (CodeBlob* cb); aoqi@0: static CodeBlob* alive(CodeBlob *cb); aoqi@0: static nmethod* alive_nmethod(CodeBlob *cb); aoqi@0: static nmethod* first_nmethod(); aoqi@0: static nmethod* next_nmethod (CodeBlob* cb); aoqi@0: static int nof_blobs() { return _number_of_blobs; } aoqi@0: static int nof_adapters() { return _number_of_adapters; } aoqi@0: static int nof_nmethods() { return _number_of_nmethods; } aoqi@0: aoqi@0: // GC support aoqi@0: static void gc_epilogue(); aoqi@0: static void gc_prologue(); aoqi@0: static void verify_oops(); aoqi@0: // If "unloading_occurred" is true, then unloads (i.e., breaks root links aoqi@0: // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" aoqi@0: // to "true" iff some code got unloaded. aoqi@0: static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); aoqi@0: static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN; aoqi@0: static void scavenge_root_nmethods_do(CodeBlobClosure* f); aoqi@0: aoqi@0: static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; } aoqi@0: static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; } aoqi@0: static void add_scavenge_root_nmethod(nmethod* nm); aoqi@0: static void drop_scavenge_root_nmethod(nmethod* nm); aoqi@0: static void prune_scavenge_root_nmethods(); aoqi@0: aoqi@0: // Printing/debugging aoqi@0: static void print(); // prints summary aoqi@0: static void print_internals(); aoqi@0: static void verify(); // verifies the code cache aoqi@0: static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN; aoqi@0: static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage aoqi@0: static void log_state(outputStream* st); aoqi@0: aoqi@0: // The full limits of the codeCache aoqi@0: static address low_bound() { return (address) _heap->low_boundary(); } aoqi@0: static address high_bound() { return (address) _heap->high_boundary(); } aoqi@0: static address high() { return (address) _heap->high(); } aoqi@0: aoqi@0: // Profiling aoqi@0: static address first_address(); // first address used for CodeBlobs aoqi@0: static address last_address(); // last address used for CodeBlobs aoqi@0: static size_t capacity() { return _heap->capacity(); } aoqi@0: static size_t max_capacity() { return _heap->max_capacity(); } aoqi@0: static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } aoqi@0: static double reverse_free_ratio(); aoqi@0: aoqi@0: static bool needs_cache_clean() { return _needs_cache_clean; } aoqi@0: static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } aoqi@0: static void clear_inline_caches(); // clear all inline caches aoqi@0: stefank@6992: static void verify_clean_inline_caches(); stefank@6992: static void verify_icholder_relocations(); stefank@6992: aoqi@0: // Deoptimization aoqi@0: static int mark_for_deoptimization(DepChange& changes); aoqi@0: #ifdef HOTSWAP aoqi@0: static int mark_for_evol_deoptimization(instanceKlassHandle dependee); aoqi@0: #endif // HOTSWAP aoqi@0: aoqi@0: static void mark_all_nmethods_for_deoptimization(); aoqi@0: static int mark_for_deoptimization(Method* dependee); aoqi@0: static void make_marked_nmethods_not_entrant(); aoqi@0: aoqi@0: // tells how many nmethods have dependencies aoqi@0: static int number_of_nmethods_with_dependencies(); aoqi@0: aoqi@0: static int get_codemem_full_count() { return _codemem_full_count; } aoqi@0: }; aoqi@0: aoqi@0: #endif // SHARE_VM_CODE_CODECACHE_HPP