Mon, 15 Apr 2019 16:27:50 +0000
8150013: ParNew: Prune nmethods scavengable list.
Summary: Speed up ParNew collections by pruning the list of scavengable nmethods.
Reviewed-by: jmasa, tonyp, twisti
duke@435 | 1 | /* |
cvarming@9661 | 2 | * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_CODE_CODECACHE_HPP |
stefank@2314 | 26 | #define SHARE_VM_CODE_CODECACHE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "code/codeBlob.hpp" |
stefank@2314 | 29 | #include "memory/allocation.hpp" |
stefank@2314 | 30 | #include "memory/heap.hpp" |
stefank@2314 | 31 | #include "oops/instanceKlass.hpp" |
stefank@2314 | 32 | #include "oops/oopsHierarchy.hpp" |
stefank@2314 | 33 | |
duke@435 | 34 | // The CodeCache implements the code cache for various pieces of generated |
duke@435 | 35 | // code, e.g., compiled java methods, runtime stubs, transition frames, etc. |
duke@435 | 36 | // The entries in the CodeCache are all CodeBlob's. |
duke@435 | 37 | |
duke@435 | 38 | // Implementation: |
duke@435 | 39 | // - Each CodeBlob occupies one chunk of memory. |
duke@435 | 40 | // - Like the offset table in oldspace the zone has at table for |
duke@435 | 41 | // locating a method given a addess of an instruction. |
duke@435 | 42 | |
duke@435 | 43 | class OopClosure; |
duke@435 | 44 | class DepChange; |
duke@435 | 45 | |
duke@435 | 46 | class CodeCache : AllStatic { |
duke@435 | 47 | friend class VMStructs; |
duke@435 | 48 | private: |
duke@435 | 49 | // CodeHeap is malloc()'ed at startup and never deleted during shutdown, |
duke@435 | 50 | // so that the generated assembly code is always there when it's needed. |
duke@435 | 51 | // This may cause memory leak, but is necessary, for now. See 4423824, |
duke@435 | 52 | // 4422213 or 4436291 for details. |
duke@435 | 53 | static CodeHeap * _heap; |
duke@435 | 54 | static int _number_of_blobs; |
never@1999 | 55 | static int _number_of_adapters; |
never@1999 | 56 | static int _number_of_nmethods; |
duke@435 | 57 | static int _number_of_nmethods_with_dependencies; |
duke@435 | 58 | static bool _needs_cache_clean; |
jrose@1424 | 59 | static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link() |
duke@435 | 60 | |
duke@435 | 61 | static void verify_if_often() PRODUCT_RETURN; |
jrose@1424 | 62 | |
jrose@1424 | 63 | static void mark_scavenge_root_nmethods() PRODUCT_RETURN; |
jrose@1424 | 64 | static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN; |
jrose@1424 | 65 | |
sla@5237 | 66 | static int _codemem_full_count; |
sla@5237 | 67 | |
cvarming@9661 | 68 | static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; } |
cvarming@9661 | 69 | static void prune_scavenge_root_nmethods(); |
cvarming@9661 | 70 | static void unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev); |
cvarming@9661 | 71 | |
duke@435 | 72 | public: |
duke@435 | 73 | |
duke@435 | 74 | // Initialization |
duke@435 | 75 | static void initialize(); |
duke@435 | 76 | |
sla@5237 | 77 | static void report_codemem_full(); |
sla@5237 | 78 | |
duke@435 | 79 | // Allocation/administration |
neliasso@4952 | 80 | static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob |
duke@435 | 81 | static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled |
duke@435 | 82 | static int alignment_unit(); // guaranteed alignment of all CodeBlobs |
duke@435 | 83 | static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) |
duke@435 | 84 | static void free(CodeBlob* cb); // frees a CodeBlob |
duke@435 | 85 | static void flush(); // flushes all CodeBlobs |
duke@435 | 86 | static bool contains(void *p); // returns whether p is included |
duke@435 | 87 | static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs |
jrose@1424 | 88 | static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs |
duke@435 | 89 | static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods |
coleenp@4037 | 90 | static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods |
duke@435 | 91 | |
duke@435 | 92 | // Lookup |
duke@435 | 93 | static CodeBlob* find_blob(void* start); |
duke@435 | 94 | static nmethod* find_nmethod(void* start); |
duke@435 | 95 | |
duke@435 | 96 | // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know |
duke@435 | 97 | // what you are doing) |
duke@435 | 98 | static CodeBlob* find_blob_unsafe(void* start) { |
zgu@3900 | 99 | // NMT can walk the stack before code cache is created |
zgu@3900 | 100 | if (_heap == NULL) return NULL; |
zgu@3900 | 101 | |
duke@435 | 102 | CodeBlob* result = (CodeBlob*)_heap->find_start(start); |
sgoldman@542 | 103 | // this assert is too strong because the heap code will return the |
sgoldman@542 | 104 | // heapblock containing start. That block can often be larger than |
sgoldman@542 | 105 | // the codeBlob itself. If you look up an address that is within |
sgoldman@542 | 106 | // the heapblock but not in the codeBlob you will assert. |
sgoldman@542 | 107 | // |
sgoldman@542 | 108 | // Most things will not lookup such bad addresses. However |
sgoldman@542 | 109 | // AsyncGetCallTrace can see intermediate frames and get that kind |
sgoldman@542 | 110 | // of invalid address and so can a developer using hsfind. |
sgoldman@542 | 111 | // |
sgoldman@542 | 112 | // The more correct answer is to return NULL if blob_contains() returns |
sgoldman@542 | 113 | // false. |
sgoldman@542 | 114 | // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob"); |
sgoldman@542 | 115 | |
sgoldman@542 | 116 | if (result != NULL && !result->blob_contains((address)start)) { |
sgoldman@542 | 117 | result = NULL; |
sgoldman@542 | 118 | } |
duke@435 | 119 | return result; |
duke@435 | 120 | } |
duke@435 | 121 | |
duke@435 | 122 | // Iteration |
duke@435 | 123 | static CodeBlob* first(); |
duke@435 | 124 | static CodeBlob* next (CodeBlob* cb); |
duke@435 | 125 | static CodeBlob* alive(CodeBlob *cb); |
duke@435 | 126 | static nmethod* alive_nmethod(CodeBlob *cb); |
never@1893 | 127 | static nmethod* first_nmethod(); |
never@1893 | 128 | static nmethod* next_nmethod (CodeBlob* cb); |
duke@435 | 129 | static int nof_blobs() { return _number_of_blobs; } |
never@1999 | 130 | static int nof_adapters() { return _number_of_adapters; } |
never@1999 | 131 | static int nof_nmethods() { return _number_of_nmethods; } |
duke@435 | 132 | |
duke@435 | 133 | // GC support |
duke@435 | 134 | static void gc_epilogue(); |
duke@435 | 135 | static void gc_prologue(); |
never@2657 | 136 | static void verify_oops(); |
duke@435 | 137 | // If "unloading_occurred" is true, then unloads (i.e., breaks root links |
duke@435 | 138 | // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading" |
duke@435 | 139 | // to "true" iff some code got unloaded. |
brutisso@4098 | 140 | static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred); |
jrose@1424 | 141 | static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN; |
cvarming@9661 | 142 | |
cvarming@9661 | 143 | // Apply f to every live code blob in scavengable nmethods. Prune nmethods |
cvarming@9661 | 144 | // from the list of scavengable nmethods if f->fix_relocations() and a nmethod |
cvarming@9661 | 145 | // no longer has scavengable oops. If f->fix_relocations(), then f must copy |
cvarming@9661 | 146 | // objects to their new location immediately to avoid fixing nmethods on the |
cvarming@9661 | 147 | // basis of the old object locations. |
cvarming@9661 | 148 | static void scavenge_root_nmethods_do(CodeBlobToOopClosure* f); |
jrose@1424 | 149 | |
jrose@1424 | 150 | static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; } |
jrose@1424 | 151 | static void add_scavenge_root_nmethod(nmethod* nm); |
jrose@1424 | 152 | static void drop_scavenge_root_nmethod(nmethod* nm); |
duke@435 | 153 | |
duke@435 | 154 | // Printing/debugging |
vladidan@4438 | 155 | static void print(); // prints summary |
duke@435 | 156 | static void print_internals(); |
duke@435 | 157 | static void verify(); // verifies the code cache |
jrose@1424 | 158 | static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN; |
vladidan@4438 | 159 | static void print_summary(outputStream* st, bool detailed = true); // Prints a summary of the code cache usage |
kvn@2635 | 160 | static void log_state(outputStream* st); |
duke@435 | 161 | |
duke@435 | 162 | // The full limits of the codeCache |
duke@435 | 163 | static address low_bound() { return (address) _heap->low_boundary(); } |
duke@435 | 164 | static address high_bound() { return (address) _heap->high_boundary(); } |
sla@5237 | 165 | static address high() { return (address) _heap->high(); } |
duke@435 | 166 | |
duke@435 | 167 | // Profiling |
duke@435 | 168 | static address first_address(); // first address used for CodeBlobs |
duke@435 | 169 | static address last_address(); // last address used for CodeBlobs |
duke@435 | 170 | static size_t capacity() { return _heap->capacity(); } |
duke@435 | 171 | static size_t max_capacity() { return _heap->max_capacity(); } |
duke@435 | 172 | static size_t unallocated_capacity() { return _heap->unallocated_capacity(); } |
anoll@5151 | 173 | static double reverse_free_ratio(); |
duke@435 | 174 | |
duke@435 | 175 | static bool needs_cache_clean() { return _needs_cache_clean; } |
duke@435 | 176 | static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; } |
duke@435 | 177 | static void clear_inline_caches(); // clear all inline caches |
duke@435 | 178 | |
stefank@6992 | 179 | static void verify_clean_inline_caches(); |
stefank@6992 | 180 | static void verify_icholder_relocations(); |
stefank@6992 | 181 | |
duke@435 | 182 | // Deoptimization |
duke@435 | 183 | static int mark_for_deoptimization(DepChange& changes); |
duke@435 | 184 | #ifdef HOTSWAP |
duke@435 | 185 | static int mark_for_evol_deoptimization(instanceKlassHandle dependee); |
duke@435 | 186 | #endif // HOTSWAP |
duke@435 | 187 | |
duke@435 | 188 | static void mark_all_nmethods_for_deoptimization(); |
coleenp@4037 | 189 | static int mark_for_deoptimization(Method* dependee); |
duke@435 | 190 | static void make_marked_nmethods_not_entrant(); |
duke@435 | 191 | |
duke@435 | 192 | // tells how many nmethods have dependencies |
duke@435 | 193 | static int number_of_nmethods_with_dependencies(); |
sla@5237 | 194 | |
sla@5237 | 195 | static int get_codemem_full_count() { return _codemem_full_count; } |
duke@435 | 196 | }; |
stefank@2314 | 197 | |
stefank@2314 | 198 | #endif // SHARE_VM_CODE_CODECACHE_HPP |