Wed, 02 Jun 2010 22:45:42 -0700
Merge
1 /*
2 * Copyright (c) 1997, 2008, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 // The CodeCache implements the code cache for various pieces of generated
26 // code, e.g., compiled java methods, runtime stubs, transition frames, etc.
27 // The entries in the CodeCache are all CodeBlob's.
29 // Implementation:
30 // - Each CodeBlob occupies one chunk of memory.
31 // - Like the offset table in oldspace the zone has at table for
32 // locating a method given a addess of an instruction.
34 class OopClosure;
35 class DepChange;
37 class CodeCache : AllStatic {
38 friend class VMStructs;
39 private:
40 // CodeHeap is malloc()'ed at startup and never deleted during shutdown,
41 // so that the generated assembly code is always there when it's needed.
42 // This may cause memory leak, but is necessary, for now. See 4423824,
43 // 4422213 or 4436291 for details.
44 static CodeHeap * _heap;
45 static int _number_of_blobs;
46 static int _number_of_nmethods_with_dependencies;
47 static bool _needs_cache_clean;
48 static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
49 static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look()
51 static void verify_if_often() PRODUCT_RETURN;
53 static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
54 static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
56 public:
58 // Initialization
59 static void initialize();
61 // Allocation/administration
62 static CodeBlob* allocate(int size); // allocates a new CodeBlob
63 static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled
64 static int alignment_unit(); // guaranteed alignment of all CodeBlobs
65 static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
66 static void free(CodeBlob* cb); // frees a CodeBlob
67 static void flush(); // flushes all CodeBlobs
68 static bool contains(void *p); // returns whether p is included
69 static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
70 static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
71 static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
73 // Lookup
74 static CodeBlob* find_blob(void* start);
75 static nmethod* find_nmethod(void* start);
77 // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know
78 // what you are doing)
79 static CodeBlob* find_blob_unsafe(void* start) {
80 CodeBlob* result = (CodeBlob*)_heap->find_start(start);
81 // this assert is too strong because the heap code will return the
82 // heapblock containing start. That block can often be larger than
83 // the codeBlob itself. If you look up an address that is within
84 // the heapblock but not in the codeBlob you will assert.
85 //
86 // Most things will not lookup such bad addresses. However
87 // AsyncGetCallTrace can see intermediate frames and get that kind
88 // of invalid address and so can a developer using hsfind.
89 //
90 // The more correct answer is to return NULL if blob_contains() returns
91 // false.
92 // assert(result == NULL || result->blob_contains((address)start), "found wrong CodeBlob");
94 if (result != NULL && !result->blob_contains((address)start)) {
95 result = NULL;
96 }
97 return result;
98 }
100 // Iteration
101 static CodeBlob* first();
102 static CodeBlob* next (CodeBlob* cb);
103 static CodeBlob* alive(CodeBlob *cb);
104 static nmethod* alive_nmethod(CodeBlob *cb);
105 static nmethod* first_nmethod();
106 static nmethod* next_nmethod (CodeBlob* cb);
107 static int nof_blobs() { return _number_of_blobs; }
109 // GC support
110 static void gc_epilogue();
111 static void gc_prologue();
112 // If "unloading_occurred" is true, then unloads (i.e., breaks root links
113 // to) any unmarked codeBlobs in the cache. Sets "marked_for_unloading"
114 // to "true" iff some code got unloaded.
115 static void do_unloading(BoolObjectClosure* is_alive,
116 OopClosure* keep_alive,
117 bool unloading_occurred);
118 static void oops_do(OopClosure* f) {
119 CodeBlobToOopClosure oopc(f, /*do_marking=*/ false);
120 blobs_do(&oopc);
121 }
122 static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
123 static void scavenge_root_nmethods_do(CodeBlobClosure* f);
125 static nmethod* scavenge_root_nmethods() { return _scavenge_root_nmethods; }
126 static void set_scavenge_root_nmethods(nmethod* nm) { _scavenge_root_nmethods = nm; }
127 static void add_scavenge_root_nmethod(nmethod* nm);
128 static void drop_scavenge_root_nmethod(nmethod* nm);
129 static void prune_scavenge_root_nmethods();
131 // Printing/debugging
132 static void print() PRODUCT_RETURN; // prints summary
133 static void print_internals();
134 static void verify(); // verifies the code cache
135 static void print_trace(const char* event, CodeBlob* cb, int size = 0) PRODUCT_RETURN;
137 // The full limits of the codeCache
138 static address low_bound() { return (address) _heap->low_boundary(); }
139 static address high_bound() { return (address) _heap->high_boundary(); }
141 // Profiling
142 static address first_address(); // first address used for CodeBlobs
143 static address last_address(); // last address used for CodeBlobs
144 static size_t capacity() { return _heap->capacity(); }
145 static size_t max_capacity() { return _heap->max_capacity(); }
146 static size_t unallocated_capacity() { return _heap->unallocated_capacity(); }
147 static bool needs_flushing() { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
149 static bool needs_cache_clean() { return _needs_cache_clean; }
150 static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
151 static void clear_inline_caches(); // clear all inline caches
153 static nmethod* find_and_remove_saved_code(methodOop m);
154 static void remove_saved_code(nmethod* nm);
155 static void speculatively_disconnect(nmethod* nm);
157 // Deoptimization
158 static int mark_for_deoptimization(DepChange& changes);
159 #ifdef HOTSWAP
160 static int mark_for_evol_deoptimization(instanceKlassHandle dependee);
161 #endif // HOTSWAP
163 static void mark_all_nmethods_for_deoptimization();
164 static int mark_for_deoptimization(methodOop dependee);
165 static void make_marked_nmethods_zombies();
166 static void make_marked_nmethods_not_entrant();
168 // tells how many nmethods have dependencies
169 static int number_of_nmethods_with_dependencies();
170 };