Mon, 10 Jun 2013 11:30:51 +0200
8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "services/memoryService.hpp"
48 #include "trace/tracing.hpp"
49 #include "utilities/xmlstream.hpp"
51 // Helper class for printing in CodeCache
53 class CodeBlob_sizes {
54 private:
55 int count;
56 int total_size;
57 int header_size;
58 int code_size;
59 int stub_size;
60 int relocation_size;
61 int scopes_oop_size;
62 int scopes_metadata_size;
63 int scopes_data_size;
64 int scopes_pcs_size;
66 public:
67 CodeBlob_sizes() {
68 count = 0;
69 total_size = 0;
70 header_size = 0;
71 code_size = 0;
72 stub_size = 0;
73 relocation_size = 0;
74 scopes_oop_size = 0;
75 scopes_metadata_size = 0;
76 scopes_data_size = 0;
77 scopes_pcs_size = 0;
78 }
80 int total() { return total_size; }
81 bool is_empty() { return count == 0; }
83 void print(const char* title) {
84 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
85 count,
86 title,
87 total() / K,
88 header_size * 100 / total_size,
89 relocation_size * 100 / total_size,
90 code_size * 100 / total_size,
91 stub_size * 100 / total_size,
92 scopes_oop_size * 100 / total_size,
93 scopes_metadata_size * 100 / total_size,
94 scopes_data_size * 100 / total_size,
95 scopes_pcs_size * 100 / total_size);
96 }
98 void add(CodeBlob* cb) {
99 count++;
100 total_size += cb->size();
101 header_size += cb->header_size();
102 relocation_size += cb->relocation_size();
103 if (cb->is_nmethod()) {
104 nmethod* nm = cb->as_nmethod_or_null();
105 code_size += nm->insts_size();
106 stub_size += nm->stub_size();
108 scopes_oop_size += nm->oops_size();
109 scopes_metadata_size += nm->metadata_size();
110 scopes_data_size += nm->scopes_data_size();
111 scopes_pcs_size += nm->scopes_pcs_size();
112 } else {
113 code_size += cb->code_size();
114 }
115 }
116 };
118 // CodeCache implementation
120 CodeHeap * CodeCache::_heap = new CodeHeap();
121 int CodeCache::_number_of_blobs = 0;
122 int CodeCache::_number_of_adapters = 0;
123 int CodeCache::_number_of_nmethods = 0;
124 int CodeCache::_number_of_nmethods_with_dependencies = 0;
125 bool CodeCache::_needs_cache_clean = false;
126 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
127 nmethod* CodeCache::_saved_nmethods = NULL;
129 int CodeCache::_codemem_full_count = 0;
131 CodeBlob* CodeCache::first() {
132 assert_locked_or_safepoint(CodeCache_lock);
133 return (CodeBlob*)_heap->first();
134 }
137 CodeBlob* CodeCache::next(CodeBlob* cb) {
138 assert_locked_or_safepoint(CodeCache_lock);
139 return (CodeBlob*)_heap->next(cb);
140 }
143 CodeBlob* CodeCache::alive(CodeBlob *cb) {
144 assert_locked_or_safepoint(CodeCache_lock);
145 while (cb != NULL && !cb->is_alive()) cb = next(cb);
146 return cb;
147 }
150 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
151 assert_locked_or_safepoint(CodeCache_lock);
152 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
153 return (nmethod*)cb;
154 }
156 nmethod* CodeCache::first_nmethod() {
157 assert_locked_or_safepoint(CodeCache_lock);
158 CodeBlob* cb = first();
159 while (cb != NULL && !cb->is_nmethod()) {
160 cb = next(cb);
161 }
162 return (nmethod*)cb;
163 }
165 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
166 assert_locked_or_safepoint(CodeCache_lock);
167 cb = next(cb);
168 while (cb != NULL && !cb->is_nmethod()) {
169 cb = next(cb);
170 }
171 return (nmethod*)cb;
172 }
174 static size_t maxCodeCacheUsed = 0;
176 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
177 // Do not seize the CodeCache lock here--if the caller has not
178 // already done so, we are going to lose bigtime, since the code
179 // cache will contain a garbage CodeBlob until the caller can
180 // run the constructor for the CodeBlob subclass he is busy
181 // instantiating.
182 guarantee(size >= 0, "allocation request must be reasonable");
183 assert_locked_or_safepoint(CodeCache_lock);
184 CodeBlob* cb = NULL;
185 _number_of_blobs++;
186 while (true) {
187 cb = (CodeBlob*)_heap->allocate(size, is_critical);
188 if (cb != NULL) break;
189 if (!_heap->expand_by(CodeCacheExpansionSize)) {
190 // Expansion failed
191 return NULL;
192 }
193 if (PrintCodeCacheExtension) {
194 ResourceMark rm;
195 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
196 (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
197 (address)_heap->high() - (address)_heap->low_boundary());
198 }
199 }
200 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
201 (address)_heap->low_boundary()) - unallocated_capacity());
202 verify_if_often();
203 print_trace("allocation", cb, size);
204 return cb;
205 }
207 void CodeCache::free(CodeBlob* cb) {
208 assert_locked_or_safepoint(CodeCache_lock);
209 verify_if_often();
211 print_trace("free", cb);
212 if (cb->is_nmethod()) {
213 _number_of_nmethods--;
214 if (((nmethod *)cb)->has_dependencies()) {
215 _number_of_nmethods_with_dependencies--;
216 }
217 }
218 if (cb->is_adapter_blob()) {
219 _number_of_adapters--;
220 }
221 _number_of_blobs--;
223 _heap->deallocate(cb);
225 verify_if_often();
226 assert(_number_of_blobs >= 0, "sanity check");
227 }
230 void CodeCache::commit(CodeBlob* cb) {
231 // this is called by nmethod::nmethod, which must already own CodeCache_lock
232 assert_locked_or_safepoint(CodeCache_lock);
233 if (cb->is_nmethod()) {
234 _number_of_nmethods++;
235 if (((nmethod *)cb)->has_dependencies()) {
236 _number_of_nmethods_with_dependencies++;
237 }
238 }
239 if (cb->is_adapter_blob()) {
240 _number_of_adapters++;
241 }
243 // flush the hardware I-cache
244 ICache::invalidate_range(cb->content_begin(), cb->content_size());
245 }
248 void CodeCache::flush() {
249 assert_locked_or_safepoint(CodeCache_lock);
250 Unimplemented();
251 }
254 // Iteration over CodeBlobs
256 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
257 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
258 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
261 bool CodeCache::contains(void *p) {
262 // It should be ok to call contains without holding a lock
263 return _heap->contains(p);
264 }
267 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
268 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
269 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
270 CodeBlob* CodeCache::find_blob(void* start) {
271 CodeBlob* result = find_blob_unsafe(start);
272 if (result == NULL) return NULL;
273 // We could potientially look up non_entrant methods
274 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
275 return result;
276 }
278 nmethod* CodeCache::find_nmethod(void* start) {
279 CodeBlob *cb = find_blob(start);
280 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
281 return (nmethod*)cb;
282 }
285 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
286 assert_locked_or_safepoint(CodeCache_lock);
287 FOR_ALL_BLOBS(p) {
288 f(p);
289 }
290 }
293 void CodeCache::nmethods_do(void f(nmethod* nm)) {
294 assert_locked_or_safepoint(CodeCache_lock);
295 FOR_ALL_BLOBS(nm) {
296 if (nm->is_nmethod()) f((nmethod*)nm);
297 }
298 }
300 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
301 assert_locked_or_safepoint(CodeCache_lock);
302 FOR_ALL_ALIVE_NMETHODS(nm) {
303 f(nm);
304 }
305 }
307 int CodeCache::alignment_unit() {
308 return (int)_heap->alignment_unit();
309 }
312 int CodeCache::alignment_offset() {
313 return (int)_heap->alignment_offset();
314 }
317 // Mark nmethods for unloading if they contain otherwise unreachable
318 // oops.
319 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
320 assert_locked_or_safepoint(CodeCache_lock);
321 FOR_ALL_ALIVE_NMETHODS(nm) {
322 nm->do_unloading(is_alive, unloading_occurred);
323 }
324 }
326 void CodeCache::blobs_do(CodeBlobClosure* f) {
327 assert_locked_or_safepoint(CodeCache_lock);
328 FOR_ALL_ALIVE_BLOBS(cb) {
329 f->do_code_blob(cb);
331 #ifdef ASSERT
332 if (cb->is_nmethod())
333 ((nmethod*)cb)->verify_scavenge_root_oops();
334 #endif //ASSERT
335 }
336 }
338 // Walk the list of methods which might contain non-perm oops.
339 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
340 assert_locked_or_safepoint(CodeCache_lock);
341 debug_only(mark_scavenge_root_nmethods());
343 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
344 debug_only(cur->clear_scavenge_root_marked());
345 assert(cur->scavenge_root_not_marked(), "");
346 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
348 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
349 #ifndef PRODUCT
350 if (TraceScavenge) {
351 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
352 }
353 #endif //PRODUCT
354 if (is_live) {
355 // Perform cur->oops_do(f), maybe just once per nmethod.
356 f->do_code_blob(cur);
357 }
358 }
360 // Check for stray marks.
361 debug_only(verify_perm_nmethods(NULL));
362 }
364 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
365 assert_locked_or_safepoint(CodeCache_lock);
366 nm->set_on_scavenge_root_list();
367 nm->set_scavenge_root_link(_scavenge_root_nmethods);
368 set_scavenge_root_nmethods(nm);
369 print_trace("add_scavenge_root", nm);
370 }
372 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
373 assert_locked_or_safepoint(CodeCache_lock);
374 print_trace("drop_scavenge_root", nm);
375 nmethod* last = NULL;
376 nmethod* cur = scavenge_root_nmethods();
377 while (cur != NULL) {
378 nmethod* next = cur->scavenge_root_link();
379 if (cur == nm) {
380 if (last != NULL)
381 last->set_scavenge_root_link(next);
382 else set_scavenge_root_nmethods(next);
383 nm->set_scavenge_root_link(NULL);
384 nm->clear_on_scavenge_root_list();
385 return;
386 }
387 last = cur;
388 cur = next;
389 }
390 assert(false, "should have been on list");
391 }
393 void CodeCache::prune_scavenge_root_nmethods() {
394 assert_locked_or_safepoint(CodeCache_lock);
395 debug_only(mark_scavenge_root_nmethods());
397 nmethod* last = NULL;
398 nmethod* cur = scavenge_root_nmethods();
399 while (cur != NULL) {
400 nmethod* next = cur->scavenge_root_link();
401 debug_only(cur->clear_scavenge_root_marked());
402 assert(cur->scavenge_root_not_marked(), "");
403 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
405 if (!cur->is_zombie() && !cur->is_unloaded()
406 && cur->detect_scavenge_root_oops()) {
407 // Keep it. Advance 'last' to prevent deletion.
408 last = cur;
409 } else {
410 // Prune it from the list, so we don't have to look at it any more.
411 print_trace("prune_scavenge_root", cur);
412 cur->set_scavenge_root_link(NULL);
413 cur->clear_on_scavenge_root_list();
414 if (last != NULL)
415 last->set_scavenge_root_link(next);
416 else set_scavenge_root_nmethods(next);
417 }
418 cur = next;
419 }
421 // Check for stray marks.
422 debug_only(verify_perm_nmethods(NULL));
423 }
425 #ifndef PRODUCT
426 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
427 // While we are here, verify the integrity of the list.
428 mark_scavenge_root_nmethods();
429 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
430 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
431 cur->clear_scavenge_root_marked();
432 }
433 verify_perm_nmethods(f);
434 }
436 // Temporarily mark nmethods that are claimed to be on the non-perm list.
437 void CodeCache::mark_scavenge_root_nmethods() {
438 FOR_ALL_ALIVE_BLOBS(cb) {
439 if (cb->is_nmethod()) {
440 nmethod *nm = (nmethod*)cb;
441 assert(nm->scavenge_root_not_marked(), "clean state");
442 if (nm->on_scavenge_root_list())
443 nm->set_scavenge_root_marked();
444 }
445 }
446 }
448 // If the closure is given, run it on the unlisted nmethods.
449 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
450 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
451 FOR_ALL_ALIVE_BLOBS(cb) {
452 bool call_f = (f_or_null != NULL);
453 if (cb->is_nmethod()) {
454 nmethod *nm = (nmethod*)cb;
455 assert(nm->scavenge_root_not_marked(), "must be already processed");
456 if (nm->on_scavenge_root_list())
457 call_f = false; // don't show this one to the client
458 nm->verify_scavenge_root_oops();
459 } else {
460 call_f = false; // not an nmethod
461 }
462 if (call_f) f_or_null->do_code_blob(cb);
463 }
464 }
465 #endif //PRODUCT
467 /**
468 * Remove and return nmethod from the saved code list in order to reanimate it.
469 */
470 nmethod* CodeCache::reanimate_saved_code(Method* m) {
471 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
472 nmethod* saved = _saved_nmethods;
473 nmethod* prev = NULL;
474 while (saved != NULL) {
475 if (saved->is_in_use() && saved->method() == m) {
476 if (prev != NULL) {
477 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
478 } else {
479 _saved_nmethods = saved->saved_nmethod_link();
480 }
481 assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
482 saved->set_speculatively_disconnected(false);
483 saved->set_saved_nmethod_link(NULL);
484 if (PrintMethodFlushing) {
485 saved->print_on(tty, " ### nmethod is reconnected");
486 }
487 if (LogCompilation && (xtty != NULL)) {
488 ttyLocker ttyl;
489 xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
490 xtty->method(m);
491 xtty->stamp();
492 xtty->end_elem();
493 }
494 return saved;
495 }
496 prev = saved;
497 saved = saved->saved_nmethod_link();
498 }
499 return NULL;
500 }
502 /**
503 * Remove nmethod from the saved code list in order to discard it permanently
504 */
505 void CodeCache::remove_saved_code(nmethod* nm) {
506 // For conc swpr this will be called with CodeCache_lock taken by caller
507 assert_locked_or_safepoint(CodeCache_lock);
508 assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
509 nmethod* saved = _saved_nmethods;
510 nmethod* prev = NULL;
511 while (saved != NULL) {
512 if (saved == nm) {
513 if (prev != NULL) {
514 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
515 } else {
516 _saved_nmethods = saved->saved_nmethod_link();
517 }
518 if (LogCompilation && (xtty != NULL)) {
519 ttyLocker ttyl;
520 xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
521 xtty->stamp();
522 xtty->end_elem();
523 }
524 return;
525 }
526 prev = saved;
527 saved = saved->saved_nmethod_link();
528 }
529 ShouldNotReachHere();
530 }
532 void CodeCache::speculatively_disconnect(nmethod* nm) {
533 assert_locked_or_safepoint(CodeCache_lock);
534 assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
535 nm->set_saved_nmethod_link(_saved_nmethods);
536 _saved_nmethods = nm;
537 if (PrintMethodFlushing) {
538 nm->print_on(tty, " ### nmethod is speculatively disconnected");
539 }
540 if (LogCompilation && (xtty != NULL)) {
541 ttyLocker ttyl;
542 xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
543 xtty->method(nm->method());
544 xtty->stamp();
545 xtty->end_elem();
546 }
547 nm->method()->clear_code();
548 nm->set_speculatively_disconnected(true);
549 }
552 void CodeCache::gc_prologue() {
553 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
554 }
557 void CodeCache::gc_epilogue() {
558 assert_locked_or_safepoint(CodeCache_lock);
559 FOR_ALL_ALIVE_BLOBS(cb) {
560 if (cb->is_nmethod()) {
561 nmethod *nm = (nmethod*)cb;
562 assert(!nm->is_unloaded(), "Tautology");
563 if (needs_cache_clean()) {
564 nm->cleanup_inline_caches();
565 }
566 DEBUG_ONLY(nm->verify());
567 nm->fix_oop_relocations();
568 }
569 }
570 set_needs_cache_clean(false);
571 prune_scavenge_root_nmethods();
572 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
574 #ifdef ASSERT
575 // make sure that we aren't leaking icholders
576 int count = 0;
577 FOR_ALL_BLOBS(cb) {
578 if (cb->is_nmethod()) {
579 RelocIterator iter((nmethod*)cb);
580 while(iter.next()) {
581 if (iter.type() == relocInfo::virtual_call_type) {
582 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
583 CompiledIC *ic = CompiledIC_at(iter.reloc());
584 if (TraceCompiledIC) {
585 tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
586 ic->print();
587 }
588 assert(ic->cached_icholder() != NULL, "must be non-NULL");
589 count++;
590 }
591 }
592 }
593 }
594 }
596 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
597 CompiledICHolder::live_count(), "must agree");
598 #endif
599 }
602 void CodeCache::verify_oops() {
603 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
604 VerifyOopClosure voc;
605 FOR_ALL_ALIVE_BLOBS(cb) {
606 if (cb->is_nmethod()) {
607 nmethod *nm = (nmethod*)cb;
608 nm->oops_do(&voc);
609 nm->verify_oop_relocations();
610 }
611 }
612 }
615 address CodeCache::first_address() {
616 assert_locked_or_safepoint(CodeCache_lock);
617 return (address)_heap->low_boundary();
618 }
621 address CodeCache::last_address() {
622 assert_locked_or_safepoint(CodeCache_lock);
623 return (address)_heap->high();
624 }
626 /**
627 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
628 * is free, reverse_free_ratio() returns 4.
629 */
630 double CodeCache::reverse_free_ratio() {
631 double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
632 double max_capacity = (double)CodeCache::max_capacity();
633 return max_capacity / unallocated_capacity;
634 }
636 void icache_init();
638 void CodeCache::initialize() {
639 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
640 #ifdef COMPILER2
641 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
642 #endif
643 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
644 // This was originally just a check of the alignment, causing failure, instead, round
645 // the code cache to the page size. In particular, Solaris is moving to a larger
646 // default page size.
647 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
648 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
649 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
650 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
651 vm_exit_during_initialization("Could not reserve enough space for code cache");
652 }
654 MemoryService::add_code_heap_memory_pool(_heap);
656 // Initialize ICache flush mechanism
657 // This service is needed for os::register_code_area
658 icache_init();
660 // Give OS a chance to register generated code area.
661 // This is used on Windows 64 bit platforms to register
662 // Structured Exception Handlers for our generated code.
663 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
664 }
667 void codeCache_init() {
668 CodeCache::initialize();
669 }
671 //------------------------------------------------------------------------------------------------
673 int CodeCache::number_of_nmethods_with_dependencies() {
674 return _number_of_nmethods_with_dependencies;
675 }
677 void CodeCache::clear_inline_caches() {
678 assert_locked_or_safepoint(CodeCache_lock);
679 FOR_ALL_ALIVE_NMETHODS(nm) {
680 nm->clear_inline_caches();
681 }
682 }
684 #ifndef PRODUCT
685 // used to keep track of how much time is spent in mark_for_deoptimization
686 static elapsedTimer dependentCheckTime;
687 static int dependentCheckCount = 0;
688 #endif // PRODUCT
691 int CodeCache::mark_for_deoptimization(DepChange& changes) {
692 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
694 #ifndef PRODUCT
695 dependentCheckTime.start();
696 dependentCheckCount++;
697 #endif // PRODUCT
699 int number_of_marked_CodeBlobs = 0;
701 // search the hierarchy looking for nmethods which are affected by the loading of this class
703 // then search the interfaces this class implements looking for nmethods
704 // which might be dependent of the fact that an interface only had one
705 // implementor.
707 { No_Safepoint_Verifier nsv;
708 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
709 Klass* d = str.klass();
710 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
711 }
712 }
714 if (VerifyDependencies) {
715 // Turn off dependency tracing while actually testing deps.
716 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
717 FOR_ALL_ALIVE_NMETHODS(nm) {
718 if (!nm->is_marked_for_deoptimization() &&
719 nm->check_all_dependencies()) {
720 ResourceMark rm;
721 tty->print_cr("Should have been marked for deoptimization:");
722 changes.print();
723 nm->print();
724 nm->print_dependencies();
725 }
726 }
727 }
729 #ifndef PRODUCT
730 dependentCheckTime.stop();
731 #endif // PRODUCT
733 return number_of_marked_CodeBlobs;
734 }
737 #ifdef HOTSWAP
738 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
739 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
740 int number_of_marked_CodeBlobs = 0;
742 // Deoptimize all methods of the evolving class itself
743 Array<Method*>* old_methods = dependee->methods();
744 for (int i = 0; i < old_methods->length(); i++) {
745 ResourceMark rm;
746 Method* old_method = old_methods->at(i);
747 nmethod *nm = old_method->code();
748 if (nm != NULL) {
749 nm->mark_for_deoptimization();
750 number_of_marked_CodeBlobs++;
751 }
752 }
754 FOR_ALL_ALIVE_NMETHODS(nm) {
755 if (nm->is_marked_for_deoptimization()) {
756 // ...Already marked in the previous pass; don't count it again.
757 } else if (nm->is_evol_dependent_on(dependee())) {
758 ResourceMark rm;
759 nm->mark_for_deoptimization();
760 number_of_marked_CodeBlobs++;
761 } else {
762 // flush caches in case they refer to a redefined Method*
763 nm->clear_inline_caches();
764 }
765 }
767 return number_of_marked_CodeBlobs;
768 }
769 #endif // HOTSWAP
772 // Deoptimize all methods
773 void CodeCache::mark_all_nmethods_for_deoptimization() {
774 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
775 FOR_ALL_ALIVE_NMETHODS(nm) {
776 nm->mark_for_deoptimization();
777 }
778 }
781 int CodeCache::mark_for_deoptimization(Method* dependee) {
782 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
783 int number_of_marked_CodeBlobs = 0;
785 FOR_ALL_ALIVE_NMETHODS(nm) {
786 if (nm->is_dependent_on_method(dependee)) {
787 ResourceMark rm;
788 nm->mark_for_deoptimization();
789 number_of_marked_CodeBlobs++;
790 }
791 }
793 return number_of_marked_CodeBlobs;
794 }
796 void CodeCache::make_marked_nmethods_zombies() {
797 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
798 FOR_ALL_ALIVE_NMETHODS(nm) {
799 if (nm->is_marked_for_deoptimization()) {
801 // If the nmethod has already been made non-entrant and it can be converted
802 // then zombie it now. Otherwise make it non-entrant and it will eventually
803 // be zombied when it is no longer seen on the stack. Note that the nmethod
804 // might be "entrant" and not on the stack and so could be zombied immediately
805 // but we can't tell because we don't track it on stack until it becomes
806 // non-entrant.
808 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
809 nm->make_zombie();
810 } else {
811 nm->make_not_entrant();
812 }
813 }
814 }
815 }
817 void CodeCache::make_marked_nmethods_not_entrant() {
818 assert_locked_or_safepoint(CodeCache_lock);
819 FOR_ALL_ALIVE_NMETHODS(nm) {
820 if (nm->is_marked_for_deoptimization()) {
821 nm->make_not_entrant();
822 }
823 }
824 }
826 void CodeCache::verify() {
827 _heap->verify();
828 FOR_ALL_ALIVE_BLOBS(p) {
829 p->verify();
830 }
831 }
833 void CodeCache::report_codemem_full() {
834 _codemem_full_count++;
835 EventCodeCacheFull event;
836 if (event.should_commit()) {
837 event.set_startAddress((u8)low_bound());
838 event.set_commitedTopAddress((u8)high());
839 event.set_reservedTopAddress((u8)high_bound());
840 event.set_entryCount(nof_blobs());
841 event.set_methodCount(nof_nmethods());
842 event.set_adaptorCount(nof_adapters());
843 event.set_unallocatedCapacity(unallocated_capacity()/K);
844 event.set_fullCount(_codemem_full_count);
845 event.commit();
846 }
847 }
849 //------------------------------------------------------------------------------------------------
850 // Non-product version
852 #ifndef PRODUCT
854 void CodeCache::verify_if_often() {
855 if (VerifyCodeCacheOften) {
856 _heap->verify();
857 }
858 }
860 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
861 if (PrintCodeCache2) { // Need to add a new flag
862 ResourceMark rm;
863 if (size == 0) size = cb->size();
864 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
865 }
866 }
868 void CodeCache::print_internals() {
869 int nmethodCount = 0;
870 int runtimeStubCount = 0;
871 int adapterCount = 0;
872 int deoptimizationStubCount = 0;
873 int uncommonTrapStubCount = 0;
874 int bufferBlobCount = 0;
875 int total = 0;
876 int nmethodAlive = 0;
877 int nmethodNotEntrant = 0;
878 int nmethodZombie = 0;
879 int nmethodUnloaded = 0;
880 int nmethodJava = 0;
881 int nmethodNative = 0;
882 int maxCodeSize = 0;
883 ResourceMark rm;
885 CodeBlob *cb;
886 for (cb = first(); cb != NULL; cb = next(cb)) {
887 total++;
888 if (cb->is_nmethod()) {
889 nmethod* nm = (nmethod*)cb;
891 if (Verbose && nm->method() != NULL) {
892 ResourceMark rm;
893 char *method_name = nm->method()->name_and_sig_as_C_string();
894 tty->print("%s", method_name);
895 if(nm->is_alive()) { tty->print_cr(" alive"); }
896 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
897 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
898 }
900 nmethodCount++;
902 if(nm->is_alive()) { nmethodAlive++; }
903 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
904 if(nm->is_zombie()) { nmethodZombie++; }
905 if(nm->is_unloaded()) { nmethodUnloaded++; }
906 if(nm->is_native_method()) { nmethodNative++; }
908 if(nm->method() != NULL && nm->is_java_method()) {
909 nmethodJava++;
910 if (nm->insts_size() > maxCodeSize) {
911 maxCodeSize = nm->insts_size();
912 }
913 }
914 } else if (cb->is_runtime_stub()) {
915 runtimeStubCount++;
916 } else if (cb->is_deoptimization_stub()) {
917 deoptimizationStubCount++;
918 } else if (cb->is_uncommon_trap_stub()) {
919 uncommonTrapStubCount++;
920 } else if (cb->is_adapter_blob()) {
921 adapterCount++;
922 } else if (cb->is_buffer_blob()) {
923 bufferBlobCount++;
924 }
925 }
927 int bucketSize = 512;
928 int bucketLimit = maxCodeSize / bucketSize + 1;
929 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
930 memset(buckets,0,sizeof(int) * bucketLimit);
932 for (cb = first(); cb != NULL; cb = next(cb)) {
933 if (cb->is_nmethod()) {
934 nmethod* nm = (nmethod*)cb;
935 if(nm->is_java_method()) {
936 buckets[nm->insts_size() / bucketSize]++;
937 }
938 }
939 }
940 tty->print_cr("Code Cache Entries (total of %d)",total);
941 tty->print_cr("-------------------------------------------------");
942 tty->print_cr("nmethods: %d",nmethodCount);
943 tty->print_cr("\talive: %d",nmethodAlive);
944 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
945 tty->print_cr("\tzombie: %d",nmethodZombie);
946 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
947 tty->print_cr("\tjava: %d",nmethodJava);
948 tty->print_cr("\tnative: %d",nmethodNative);
949 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
950 tty->print_cr("adapters: %d",adapterCount);
951 tty->print_cr("buffer blobs: %d",bufferBlobCount);
952 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
953 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
954 tty->print_cr("\nnmethod size distribution (non-zombie java)");
955 tty->print_cr("-------------------------------------------------");
957 for(int i=0; i<bucketLimit; i++) {
958 if(buckets[i] != 0) {
959 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
960 tty->fill_to(40);
961 tty->print_cr("%d",buckets[i]);
962 }
963 }
965 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
966 }
968 #endif // !PRODUCT
970 void CodeCache::print() {
971 print_summary(tty);
973 #ifndef PRODUCT
974 if (!Verbose) return;
976 CodeBlob_sizes live;
977 CodeBlob_sizes dead;
979 FOR_ALL_BLOBS(p) {
980 if (!p->is_alive()) {
981 dead.add(p);
982 } else {
983 live.add(p);
984 }
985 }
987 tty->print_cr("CodeCache:");
989 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
990 dependentCheckTime.seconds() / dependentCheckCount);
992 if (!live.is_empty()) {
993 live.print("live");
994 }
995 if (!dead.is_empty()) {
996 dead.print("dead");
997 }
1000 if (WizardMode) {
1001 // print the oop_map usage
1002 int code_size = 0;
1003 int number_of_blobs = 0;
1004 int number_of_oop_maps = 0;
1005 int map_size = 0;
1006 FOR_ALL_BLOBS(p) {
1007 if (p->is_alive()) {
1008 number_of_blobs++;
1009 code_size += p->code_size();
1010 OopMapSet* set = p->oop_maps();
1011 if (set != NULL) {
1012 number_of_oop_maps += set->size();
1013 map_size += set->heap_size();
1014 }
1015 }
1016 }
1017 tty->print_cr("OopMaps");
1018 tty->print_cr(" #blobs = %d", number_of_blobs);
1019 tty->print_cr(" code size = %d", code_size);
1020 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
1021 tty->print_cr(" map size = %d", map_size);
1022 }
1024 #endif // !PRODUCT
1025 }
1027 void CodeCache::print_summary(outputStream* st, bool detailed) {
1028 size_t total = (_heap->high_boundary() - _heap->low_boundary());
1029 st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
1030 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1031 total/K, (total - unallocated_capacity())/K,
1032 maxCodeCacheUsed/K, unallocated_capacity()/K);
1034 if (detailed) {
1035 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1036 _heap->low_boundary(),
1037 _heap->high(),
1038 _heap->high_boundary());
1039 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1040 " adapters=" UINT32_FORMAT,
1041 nof_blobs(), nof_nmethods(), nof_adapters());
1042 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1043 "enabled" : Arguments::mode() == Arguments::_int ?
1044 "disabled (interpreter mode)" :
1045 "disabled (not enough contiguous free space left)");
1046 }
1047 }
1049 void CodeCache::log_state(outputStream* st) {
1050 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1051 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1052 nof_blobs(), nof_nmethods(), nof_adapters(),
1053 unallocated_capacity());
1054 }