src/share/vm/code/codeCache.hpp

Thu, 28 Jun 2012 17:03:16 -0400

author
zgu
date
Thu, 28 Jun 2012 17:03:16 -0400
changeset 3900
d2a62e0f25eb
parent 2750
6c97c830fb6f
child 4037
da91efe96a93
permissions
-rw-r--r--

6995781: Native Memory Tracking (Phase 1)
7151532: DCmd for hotspot native memory tracking
Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd
Reviewed-by: acorn, coleenp, fparain

duke@435 1 /*
trims@2708 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_CODE_CODECACHE_HPP
stefank@2314 26 #define SHARE_VM_CODE_CODECACHE_HPP
stefank@2314 27
stefank@2314 28 #include "code/codeBlob.hpp"
stefank@2314 29 #include "memory/allocation.hpp"
stefank@2314 30 #include "memory/heap.hpp"
stefank@2314 31 #include "oops/instanceKlass.hpp"
stefank@2314 32 #include "oops/oopsHierarchy.hpp"
stefank@2314 33
duke@435 34 // The CodeCache implements the code cache for various pieces of generated
duke@435 35 // code, e.g., compiled java methods, runtime stubs, transition frames, etc.
duke@435 36 // The entries in the CodeCache are all CodeBlob's.
duke@435 37
duke@435 38 // Implementation:
duke@435 39 // - Each CodeBlob occupies one chunk of memory.
duke@435 40 // - Like the offset table in oldspace the zone has at table for
duke@435 41 // locating a method given a addess of an instruction.
duke@435 42
duke@435 43 class OopClosure;
duke@435 44 class DepChange;
duke@435 45
duke@435 46 class CodeCache : AllStatic {
duke@435 47 friend class VMStructs;
duke@435 48 private:
duke@435 49 // CodeHeap is malloc()'ed at startup and never deleted during shutdown,
duke@435 50 // so that the generated assembly code is always there when it's needed.
duke@435 51 // This may cause memory leak, but is necessary, for now. See 4423824,
duke@435 52 // 4422213 or 4436291 for details.
duke@435 53 static CodeHeap * _heap;
duke@435 54 static int _number_of_blobs;
never@1999 55 static int _number_of_adapters;
never@1999 56 static int _number_of_nmethods;
duke@435 57 static int _number_of_nmethods_with_dependencies;
duke@435 58 static bool _needs_cache_clean;
jrose@1424 59 static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
kvn@1637 60 static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look()
duke@435 61
duke@435 62 static void verify_if_often() PRODUCT_RETURN;
jrose@1424 63
jrose@1424 64 static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
jrose@1424 65 static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
jrose@1424 66
duke@435 67 public:
duke@435 68
duke@435 69 // Initialization
duke@435 70 static void initialize();
duke@435 71
duke@435 72 // Allocation/administration
duke@435 73 static CodeBlob* allocate(int size); // allocates a new CodeBlob
duke@435 74 static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
duke@435 75 static int alignment_unit(); // guaranteed alignment of all CodeBlobs
duke@435 76 static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
duke@435 77 static void free(CodeBlob* cb); // frees a CodeBlob
duke@435 78 static void flush(); // flushes all CodeBlobs
duke@435 79 static bool contains(void *p); // returns whether p is included
duke@435 80 static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
jrose@1424 81 static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
duke@435 82 static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
duke@435 83
duke@435 84 // Lookup
duke@435 85 static CodeBlob* find_blob(void* start);
duke@435 86 static nmethod* find_nmethod(void* start);
duke@435 87
duke@435 88 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
duke@435 89 // what you are doing)
duke@435 90 static CodeBlob* find_blob_unsafe(void* start) {
zgu@3900 91 // NMT can walk the stack before code cache is created
zgu@3900 92 if (_heap == NULL) return NULL;
zgu@3900 93
duke@435 94 CodeBlob* result = (CodeBlob*)_heap->find_start(start);
sgoldman@542 95 // this assert is too strong because the heap code will return the
sgoldman@542 96 // heapblock containing start. That block can often be larger than
sgoldman@542 97 // the codeBlob itself. If you look up an address that is within
sgoldman@542 98 // the heapblock but not in the codeBlob you will assert.
sgoldman@542 99 //
sgoldman@542 100 // Most things will not lookup such bad addresses. However
sgoldman@542 101 // AsyncGetCallTrace can see intermediate frames and get that kind
sgoldman@542 102 // of invalid address and so can a developer using hsfind.
sgoldman@542 103 //
sgoldman@542 104 // The more correct answer is to return NULL if blob_contains() returns
sgoldman@542 105 // false.
sgoldman@542 106 // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
sgoldman@542 107
sgoldman@542 108 if (result != NULL && !result->blob_contains((address)start)) {
sgoldman@542 109 result = NULL;
sgoldman@542 110 }
duke@435 111 return result;
duke@435 112 }
duke@435 113
duke@435 114 // Iteration
duke@435 115 static CodeBlob* first();
duke@435 116 static CodeBlob* next (CodeBlob* cb);
duke@435 117 static CodeBlob* alive(CodeBlob *cb);
duke@435 118 static nmethod* alive_nmethod(CodeBlob *cb);
never@1893 119 static nmethod* first_nmethod();
never@1893 120 static nmethod* next_nmethod (CodeBlob* cb);
duke@435 121 static int nof_blobs() { return _number_of_blobs; }
never@1999 122 static int nof_adapters() { return _number_of_adapters; }
never@1999 123 static int nof_nmethods() { return _number_of_nmethods; }
duke@435 124
duke@435 125 // GC support
duke@435 126 static void gc_epilogue();
duke@435 127 static void gc_prologue();
never@2657 128 static void verify_oops();
duke@435 129 // If "unloading_occurred" is true, then unloads (i.e., breaks root links
duke@435 130 // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
duke@435 131 // to "true" iff some code got unloaded.
duke@435 132 static void do_unloading(BoolObjectClosure* is_alive,
duke@435 133 OopClosure* keep_alive,
duke@435 134 bool unloading_occurred);
jrose@1424 135 static void oops_do(OopClosure* f) {
jrose@1424 136 CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
jrose@1424 137 blobs_do(&oopc);
jrose@1424 138 }
jrose@1424 139 static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
jrose@1424 140 static void scavenge_root_nmethods_do(CodeBlobClosure* f);
jrose@1424 141
jrose@1424 142 static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
jrose@1424 143 static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
jrose@1424 144 static void add_scavenge_root_nmethod(nmethod* nm);
jrose@1424 145 static void drop_scavenge_root_nmethod(nmethod* nm);
jrose@1424 146 static void prune_scavenge_root_nmethods();
duke@435 147
duke@435 148 // Printing/debugging
duke@435 149 static void print() PRODUCT_RETURN; // prints summary
duke@435 150 static void print_internals();
duke@435 151 static void verify(); // verifies the code cache
jrose@1424 152 static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
never@2262 153 static void print_bounds(outputStream* st); // Prints a summary of the bounds of the code cache
kvn@2635 154 static void log_state(outputStream* st);
duke@435 155
duke@435 156 // The full limits of the codeCache
duke@435 157 static address low_bound() { return (address) _heap->low_boundary(); }
duke@435 158 static address high_bound() { return (address) _heap->high_boundary(); }
duke@435 159
duke@435 160 // Profiling
duke@435 161 static address first_address(); // first address used for CodeBlobs
duke@435 162 static address last_address(); // last address used for CodeBlobs
duke@435 163 static size_t capacity() { return _heap->capacity(); }
duke@435 164 static size_t max_capacity() { return _heap->max_capacity(); }
duke@435 165 static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
never@2739 166 static size_t largest_free_block();
kvn@2635 167 static bool needs_flushing() { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; }
duke@435 168
duke@435 169 static bool needs_cache_clean() { return _needs_cache_clean; }
duke@435 170 static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
duke@435 171 static void clear_inline_caches(); // clear all inline caches
duke@435 172
kvn@1637 173 static nmethod* find_and_remove_saved_code(methodOop m);
kvn@1637 174 static void remove_saved_code(nmethod* nm);
kvn@1637 175 static void speculatively_disconnect(nmethod* nm);
kvn@1637 176
duke@435 177 // Deoptimization
duke@435 178 static int mark_for_deoptimization(DepChange& changes);
duke@435 179 #ifdef HOTSWAP
duke@435 180 static int mark_for_evol_deoptimization(instanceKlassHandle dependee);
duke@435 181 #endif // HOTSWAP
duke@435 182
duke@435 183 static void mark_all_nmethods_for_deoptimization();
duke@435 184 static int mark_for_deoptimization(methodOop dependee);
duke@435 185 static void make_marked_nmethods_zombies();
duke@435 186 static void make_marked_nmethods_not_entrant();
duke@435 187
duke@435 188 // tells how many nmethods have dependencies
duke@435 189 static int number_of_nmethods_with_dependencies();
duke@435 190 };
stefank@2314 191
stefank@2314 192 #endif // SHARE_VM_CODE_CODECACHE_HPP

mercurial