duke@435: /* never@1999: * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_CODE_CODECACHE_HPP stefank@2314: #define SHARE_VM_CODE_CODECACHE_HPP stefank@2314: stefank@2314: #include "code/codeBlob.hpp" stefank@2314: #include "memory/allocation.hpp" stefank@2314: #include "memory/heap.hpp" stefank@2314: #include "oops/instanceKlass.hpp" stefank@2314: #include "oops/oopsHierarchy.hpp" stefank@2314: duke@435: // The CodeCache implements the code cache for various pieces of generated duke@435: // code, e.g., compiled java methods, runtime stubs, transition frames, etc. duke@435: // The entries in the CodeCache are all CodeBlob's. duke@435: duke@435: // Implementation: duke@435: // - Each CodeBlob occupies one chunk of memory. duke@435: // - Like the offset table in oldspace the zone has at table for duke@435: // locating a method given a addess of an instruction. duke@435: duke@435: class OopClosure; duke@435: class DepChange; duke@435: duke@435: class CodeCache : AllStatic { duke@435: friend class VMStructs; duke@435: private: duke@435: // CodeHeap is malloc()'ed at startup and never deleted during shutdown, duke@435: // so that the generated assembly code is always there when it's needed. duke@435: // This may cause memory leak, but is necessary, for now. See 4423824, duke@435: // 4422213 or 4436291 for details. duke@435: static CodeHeap * _heap; duke@435: static int _number_of_blobs; never@1999: static int _number_of_adapters; never@1999: static int _number_of_nmethods; duke@435: static int _number_of_nmethods_with_dependencies; duke@435: static bool _needs_cache_clean; jrose@1424: static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() kvn@1637: static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look() duke@435: duke@435: static void verify_if_often() PRODUCT_RETURN; jrose@1424: jrose@1424: static void mark_scavenge_root_nmethods() PRODUCT_RETURN; jrose@1424: static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; jrose@1424: duke@435: public: duke@435: duke@435: // Initialization duke@435: static void initialize(); duke@435: duke@435: // Allocation/administration duke@435: static CodeBlob* allocate(int size); // allocates a new CodeBlob duke@435: static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled duke@435: static int alignment_unit(); // guaranteed alignment of all CodeBlobs duke@435: static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) duke@435: static void free(CodeBlob* cb); // frees a CodeBlob duke@435: static void flush(); // flushes all CodeBlobs duke@435: static bool contains(void *p); // returns whether p is included duke@435: static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs jrose@1424: static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs duke@435: static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods duke@435: duke@435: // Lookup duke@435: static CodeBlob* find_blob(void* start); duke@435: static nmethod* find_nmethod(void* start); duke@435: duke@435: // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know duke@435: // what you are doing) duke@435: static CodeBlob* find_blob_unsafe(void* start) { duke@435: CodeBlob* result = (CodeBlob*)_heap->find_start(start); sgoldman@542: // this assert is too strong because the heap code will return the sgoldman@542: // heapblock containing start. That block can often be larger than sgoldman@542: // the codeBlob itself. If you look up an address that is within sgoldman@542: // the heapblock but not in the codeBlob you will assert. sgoldman@542: // sgoldman@542: // Most things will not lookup such bad addresses. However sgoldman@542: // AsyncGetCallTrace can see intermediate frames and get that kind sgoldman@542: // of invalid address and so can a developer using hsfind. sgoldman@542: // sgoldman@542: // The more correct answer is to return NULL if blob_contains() returns sgoldman@542: // false. sgoldman@542: // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob"); sgoldman@542: sgoldman@542: if (result != NULL && !result->blob_contains((address)start)) { sgoldman@542: result = NULL; sgoldman@542: } duke@435: return result; duke@435: } duke@435: duke@435: // Iteration duke@435: static CodeBlob* first(); duke@435: static CodeBlob* next (CodeBlob* cb); duke@435: static CodeBlob* alive(CodeBlob *cb); duke@435: static nmethod* alive_nmethod(CodeBlob *cb); never@1893: static nmethod* first_nmethod(); never@1893: static nmethod* next_nmethod (CodeBlob* cb); duke@435: static int nof_blobs() { return _number_of_blobs; } never@1999: static int nof_adapters() { return _number_of_adapters; } never@1999: static int nof_nmethods() { return _number_of_nmethods; } duke@435: duke@435: // GC support duke@435: static void gc_epilogue(); duke@435: static void gc_prologue(); duke@435: // If "unloading_occurred" is true, then unloads (i.e., breaks root links duke@435: // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" duke@435: // to "true" iff some code got unloaded. duke@435: static void do_unloading(BoolObjectClosure* is_alive, duke@435: OopClosure* keep_alive, duke@435: bool unloading_occurred); jrose@1424: static void oops_do(OopClosure* f) { jrose@1424: CodeBlobToOopClosure oopc(f, /*do_marking=*/ false); jrose@1424: blobs_do(&oopc); jrose@1424: } jrose@1424: static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN; jrose@1424: static void scavenge_root_nmethods_do(CodeBlobClosure* f); jrose@1424: jrose@1424: static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; } jrose@1424: static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; } jrose@1424: static void add_scavenge_root_nmethod(nmethod* nm); jrose@1424: static void drop_scavenge_root_nmethod(nmethod* nm); jrose@1424: static void prune_scavenge_root_nmethods(); duke@435: duke@435: // Printing/debugging duke@435: static void print() PRODUCT_RETURN; // prints summary duke@435: static void print_internals(); duke@435: static void verify(); // verifies the code cache jrose@1424: static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN; never@2262: static void print_bounds(outputStream* st); // Prints a summary of the bounds of the code cache duke@435: duke@435: // The full limits of the codeCache duke@435: static address low_bound() { return (address) _heap->low_boundary(); } duke@435: static address high_bound() { return (address) _heap->high_boundary(); } duke@435: duke@435: // Profiling duke@435: static address first_address(); // first address used for CodeBlobs duke@435: static address last_address(); // last address used for CodeBlobs duke@435: static size_t capacity() { return _heap->capacity(); } duke@435: static size_t max_capacity() { return _heap->max_capacity(); } duke@435: static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } kvn@2414: static size_t largest_free_block() { return _heap->largest_free_block(); } kvn@1637: static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; } duke@435: duke@435: static bool needs_cache_clean() { return _needs_cache_clean; } duke@435: static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } duke@435: static void clear_inline_caches(); // clear all inline caches duke@435: kvn@1637: static nmethod* find_and_remove_saved_code(methodOop m); kvn@1637: static void remove_saved_code(nmethod* nm); kvn@1637: static void speculatively_disconnect(nmethod* nm); kvn@1637: duke@435: // Deoptimization duke@435: static int mark_for_deoptimization(DepChange& changes); duke@435: #ifdef HOTSWAP duke@435: static int mark_for_evol_deoptimization(instanceKlassHandle dependee); duke@435: #endif // HOTSWAP duke@435: duke@435: static void mark_all_nmethods_for_deoptimization(); duke@435: static int mark_for_deoptimization(methodOop dependee); duke@435: static void make_marked_nmethods_zombies(); duke@435: static void make_marked_nmethods_not_entrant(); duke@435: duke@435: // tells how many nmethods have dependencies duke@435: static int number_of_nmethods_with_dependencies(); duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_CODE_CODECACHE_HPP