Mon, 07 Jul 2014 10:12:40 +0200
8049421: G1 Class Unloading after completing a concurrent mark cycle
Reviewed-by: tschatzl, ehelin, brutisso, coleenp, roland, iveresov
Contributed-by: stefan.karlsson@oracle.com, mikael.gerdin@oracle.com
duke@435 | 1 | /* |
sla@5237 | 2 | * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_CODE_CODECACHE_HPP |
stefank@2314 | 26 | #define SHARE_VM_CODE_CODECACHE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "code/codeBlob.hpp" |
stefank@2314 | 29 | #include "memory/allocation.hpp" |
stefank@2314 | 30 | #include "memory/heap.hpp" |
stefank@2314 | 31 | #include "oops/instanceKlass.hpp" |
stefank@2314 | 32 | #include "oops/oopsHierarchy.hpp" |
stefank@2314 | 33 | |
duke@435 | 34 | // The CodeCache implements the code cache for various pieces of generated |
duke@435 | 35 | // code, e.g., compiled java methods, runtime stubs, transition frames, etc. |
duke@435 | 36 | // The entries in the CodeCache are all CodeBlob's. |
duke@435 | 37 | |
duke@435 | 38 | // Implementation: |
duke@435 | 39 | // - Each CodeBlob occupies one chunk of memory. |
duke@435 | 40 | // - Like the offset table in oldspace the zone has at table for |
duke@435 | 41 | // locating a method given a addess of an instruction. |
duke@435 | 42 | |
duke@435 | 43 | class OopClosure; |
duke@435 | 44 | class DepChange; |
duke@435 | 45 | |
duke@435 | 46 | class CodeCache : AllStatic { |
duke@435 | 47 | friend class VMStructs; |
duke@435 | 48 | private: |
duke@435 | 49 | // CodeHeap is malloc()'ed at startup and never deleted during shutdown, |
duke@435 | 50 | // so that the generated assembly code is always there when it's needed. |
duke@435 | 51 | // This may cause memory leak, but is necessary, for now. See 4423824, |
duke@435 | 52 | // 4422213 or 4436291 for details. |
duke@435 | 53 | static CodeHeap * _heap; |
duke@435 | 54 | static int _number_of_blobs; |
never@1999 | 55 | static int _number_of_adapters; |
never@1999 | 56 | static int _number_of_nmethods; |
duke@435 | 57 | static int _number_of_nmethods_with_dependencies; |
duke@435 | 58 | static bool _needs_cache_clean; |
jrose@1424 | 59 | static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() |
duke@435 | 60 | |
duke@435 | 61 | static void verify_if_often() PRODUCT_RETURN; |
jrose@1424 | 62 | |
jrose@1424 | 63 | static void mark_scavenge_root_nmethods() PRODUCT_RETURN; |
jrose@1424 | 64 | static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; |
jrose@1424 | 65 | |
sla@5237 | 66 | static int _codemem_full_count; |
sla@5237 | 67 | |
duke@435 | 68 | public: |
duke@435 | 69 | |
duke@435 | 70 | // Initialization |
duke@435 | 71 | static void initialize(); |
duke@435 | 72 | |
sla@5237 | 73 | static void report_codemem_full(); |
sla@5237 | 74 | |
duke@435 | 75 | // Allocation/administration |
neliasso@4952 | 76 | static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob |
duke@435 | 77 | static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled |
duke@435 | 78 | static int alignment_unit(); // guaranteed alignment of all CodeBlobs |
duke@435 | 79 | static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) |
duke@435 | 80 | static void free(CodeBlob* cb); // frees a CodeBlob |
duke@435 | 81 | static void flush(); // flushes all CodeBlobs |
duke@435 | 82 | static bool contains(void *p); // returns whether p is included |
duke@435 | 83 | static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs |
jrose@1424 | 84 | static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs |
duke@435 | 85 | static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods |
coleenp@4037 | 86 | static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods |
duke@435 | 87 | |
duke@435 | 88 | // Lookup |
duke@435 | 89 | static CodeBlob* find_blob(void* start); |
duke@435 | 90 | static nmethod* find_nmethod(void* start); |
duke@435 | 91 | |
duke@435 | 92 | // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know |
duke@435 | 93 | // what you are doing) |
duke@435 | 94 | static CodeBlob* find_blob_unsafe(void* start) { |
zgu@3900 | 95 | // NMT can walk the stack before code cache is created |
zgu@3900 | 96 | if (_heap == NULL) return NULL; |
zgu@3900 | 97 | |
duke@435 | 98 | CodeBlob* result = (CodeBlob*)_heap->find_start(start); |
sgoldman@542 | 99 | // this assert is too strong because the heap code will return the |
sgoldman@542 | 100 | // heapblock containing start. That block can often be larger than |
sgoldman@542 | 101 | // the codeBlob itself. If you look up an address that is within |
sgoldman@542 | 102 | // the heapblock but not in the codeBlob you will assert. |
sgoldman@542 | 103 | // |
sgoldman@542 | 104 | // Most things will not lookup such bad addresses. However |
sgoldman@542 | 105 | // AsyncGetCallTrace can see intermediate frames and get that kind |
sgoldman@542 | 106 | // of invalid address and so can a developer using hsfind. |
sgoldman@542 | 107 | // |
sgoldman@542 | 108 | // The more correct answer is to return NULL if blob_contains() returns |
sgoldman@542 | 109 | // false. |
sgoldman@542 | 110 | // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob"); |
sgoldman@542 | 111 | |
sgoldman@542 | 112 | if (result != NULL && !result->blob_contains((address)start)) { |
sgoldman@542 | 113 | result = NULL; |
sgoldman@542 | 114 | } |
duke@435 | 115 | return result; |
duke@435 | 116 | } |
duke@435 | 117 | |
duke@435 | 118 | // Iteration |
duke@435 | 119 | static CodeBlob* first(); |
duke@435 | 120 | static CodeBlob* next (CodeBlob* cb); |
duke@435 | 121 | static CodeBlob* alive(CodeBlob *cb); |
duke@435 | 122 | static nmethod* alive_nmethod(CodeBlob *cb); |
never@1893 | 123 | static nmethod* first_nmethod(); |
never@1893 | 124 | static nmethod* next_nmethod (CodeBlob* cb); |
duke@435 | 125 | static int nof_blobs() { return _number_of_blobs; } |
never@1999 | 126 | static int nof_adapters() { return _number_of_adapters; } |
never@1999 | 127 | static int nof_nmethods() { return _number_of_nmethods; } |
duke@435 | 128 | |
duke@435 | 129 | // GC support |
duke@435 | 130 | static void gc_epilogue(); |
duke@435 | 131 | static void gc_prologue(); |
never@2657 | 132 | static void verify_oops(); |
duke@435 | 133 | // If "unloading_occurred" is true, then unloads (i.e., breaks root links |
duke@435 | 134 | // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" |
duke@435 | 135 | // to "true" iff some code got unloaded. |
brutisso@4098 | 136 | static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); |
jrose@1424 | 137 | static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN; |
jrose@1424 | 138 | static void scavenge_root_nmethods_do(CodeBlobClosure* f); |
jrose@1424 | 139 | |
jrose@1424 | 140 | static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; } |
jrose@1424 | 141 | static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; } |
jrose@1424 | 142 | static void add_scavenge_root_nmethod(nmethod* nm); |
jrose@1424 | 143 | static void drop_scavenge_root_nmethod(nmethod* nm); |
jrose@1424 | 144 | static void prune_scavenge_root_nmethods(); |
duke@435 | 145 | |
duke@435 | 146 | // Printing/debugging |
vladidan@4438 | 147 | static void print(); // prints summary |
duke@435 | 148 | static void print_internals(); |
duke@435 | 149 | static void verify(); // verifies the code cache |
jrose@1424 | 150 | static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN; |
vladidan@4438 | 151 | static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage |
kvn@2635 | 152 | static void log_state(outputStream* st); |
duke@435 | 153 | |
duke@435 | 154 | // The full limits of the codeCache |
duke@435 | 155 | static address low_bound() { return (address) _heap->low_boundary(); } |
duke@435 | 156 | static address high_bound() { return (address) _heap->high_boundary(); } |
sla@5237 | 157 | static address high() { return (address) _heap->high(); } |
duke@435 | 158 | |
duke@435 | 159 | // Profiling |
duke@435 | 160 | static address first_address(); // first address used for CodeBlobs |
duke@435 | 161 | static address last_address(); // last address used for CodeBlobs |
duke@435 | 162 | static size_t capacity() { return _heap->capacity(); } |
duke@435 | 163 | static size_t max_capacity() { return _heap->max_capacity(); } |
duke@435 | 164 | static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } |
anoll@5151 | 165 | static double reverse_free_ratio(); |
duke@435 | 166 | |
duke@435 | 167 | static bool needs_cache_clean() { return _needs_cache_clean; } |
duke@435 | 168 | static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } |
duke@435 | 169 | static void clear_inline_caches(); // clear all inline caches |
duke@435 | 170 | |
stefank@6992 | 171 | static void verify_clean_inline_caches(); |
stefank@6992 | 172 | static void verify_icholder_relocations(); |
stefank@6992 | 173 | |
duke@435 | 174 | // Deoptimization |
duke@435 | 175 | static int mark_for_deoptimization(DepChange& changes); |
duke@435 | 176 | #ifdef HOTSWAP |
duke@435 | 177 | static int mark_for_evol_deoptimization(instanceKlassHandle dependee); |
duke@435 | 178 | #endif // HOTSWAP |
duke@435 | 179 | |
duke@435 | 180 | static void mark_all_nmethods_for_deoptimization(); |
coleenp@4037 | 181 | static int mark_for_deoptimization(Method* dependee); |
duke@435 | 182 | static void make_marked_nmethods_zombies(); |
duke@435 | 183 | static void make_marked_nmethods_not_entrant(); |
duke@435 | 184 | |
duke@435 | 185 | // tells how many nmethods have dependencies |
duke@435 | 186 | static int number_of_nmethods_with_dependencies(); |
sla@5237 | 187 | |
sla@5237 | 188 | static int get_codemem_full_count() { return _codemem_full_count; } |
duke@435 | 189 | }; |
stefank@2314 | 190 | |
stefank@2314 | 191 | #endif // SHARE_VM_CODE_CODECACHE_HPP |