Thu, 11 Apr 2013 13:57:44 +0200
8006952: Slow VM due to excessive code cache freelist iteration
Summary: Remove continous free block requirement
Reviewed-by: kvn
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/icache.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
47 #include "services/memoryService.hpp"
48 #include "utilities/xmlstream.hpp"
50 // Helper class for printing in CodeCache
52 class CodeBlob_sizes {
53 private:
54 int count;
55 int total_size;
56 int header_size;
57 int code_size;
58 int stub_size;
59 int relocation_size;
60 int scopes_oop_size;
61 int scopes_metadata_size;
62 int scopes_data_size;
63 int scopes_pcs_size;
65 public:
66 CodeBlob_sizes() {
67 count = 0;
68 total_size = 0;
69 header_size = 0;
70 code_size = 0;
71 stub_size = 0;
72 relocation_size = 0;
73 scopes_oop_size = 0;
74 scopes_metadata_size = 0;
75 scopes_data_size = 0;
76 scopes_pcs_size = 0;
77 }
79 int total() { return total_size; }
80 bool is_empty() { return count == 0; }
82 void print(const char* title) {
83 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
84 count,
85 title,
86 total() / K,
87 header_size * 100 / total_size,
88 relocation_size * 100 / total_size,
89 code_size * 100 / total_size,
90 stub_size * 100 / total_size,
91 scopes_oop_size * 100 / total_size,
92 scopes_metadata_size * 100 / total_size,
93 scopes_data_size * 100 / total_size,
94 scopes_pcs_size * 100 / total_size);
95 }
97 void add(CodeBlob* cb) {
98 count++;
99 total_size += cb->size();
100 header_size += cb->header_size();
101 relocation_size += cb->relocation_size();
102 if (cb->is_nmethod()) {
103 nmethod* nm = cb->as_nmethod_or_null();
104 code_size += nm->insts_size();
105 stub_size += nm->stub_size();
107 scopes_oop_size += nm->oops_size();
108 scopes_metadata_size += nm->metadata_size();
109 scopes_data_size += nm->scopes_data_size();
110 scopes_pcs_size += nm->scopes_pcs_size();
111 } else {
112 code_size += cb->code_size();
113 }
114 }
115 };
118 // CodeCache implementation
120 CodeHeap * CodeCache::_heap = new CodeHeap();
121 int CodeCache::_number_of_blobs = 0;
122 int CodeCache::_number_of_adapters = 0;
123 int CodeCache::_number_of_nmethods = 0;
124 int CodeCache::_number_of_nmethods_with_dependencies = 0;
125 bool CodeCache::_needs_cache_clean = false;
126 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
127 nmethod* CodeCache::_saved_nmethods = NULL;
130 CodeBlob* CodeCache::first() {
131 assert_locked_or_safepoint(CodeCache_lock);
132 return (CodeBlob*)_heap->first();
133 }
136 CodeBlob* CodeCache::next(CodeBlob* cb) {
137 assert_locked_or_safepoint(CodeCache_lock);
138 return (CodeBlob*)_heap->next(cb);
139 }
142 CodeBlob* CodeCache::alive(CodeBlob *cb) {
143 assert_locked_or_safepoint(CodeCache_lock);
144 while (cb != NULL && !cb->is_alive()) cb = next(cb);
145 return cb;
146 }
149 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
150 assert_locked_or_safepoint(CodeCache_lock);
151 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
152 return (nmethod*)cb;
153 }
155 nmethod* CodeCache::first_nmethod() {
156 assert_locked_or_safepoint(CodeCache_lock);
157 CodeBlob* cb = first();
158 while (cb != NULL && !cb->is_nmethod()) {
159 cb = next(cb);
160 }
161 return (nmethod*)cb;
162 }
164 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
165 assert_locked_or_safepoint(CodeCache_lock);
166 cb = next(cb);
167 while (cb != NULL && !cb->is_nmethod()) {
168 cb = next(cb);
169 }
170 return (nmethod*)cb;
171 }
173 static size_t maxCodeCacheUsed = 0;
175 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
176 // Do not seize the CodeCache lock here--if the caller has not
177 // already done so, we are going to lose bigtime, since the code
178 // cache will contain a garbage CodeBlob until the caller can
179 // run the constructor for the CodeBlob subclass he is busy
180 // instantiating.
181 guarantee(size >= 0, "allocation request must be reasonable");
182 assert_locked_or_safepoint(CodeCache_lock);
183 CodeBlob* cb = NULL;
184 _number_of_blobs++;
185 while (true) {
186 cb = (CodeBlob*)_heap->allocate(size, is_critical);
187 if (cb != NULL) break;
188 if (!_heap->expand_by(CodeCacheExpansionSize)) {
189 // Expansion failed
190 return NULL;
191 }
192 if (PrintCodeCacheExtension) {
193 ResourceMark rm;
194 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
195 (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
196 (address)_heap->high() - (address)_heap->low_boundary());
197 }
198 }
199 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
200 (address)_heap->low_boundary()) - unallocated_capacity());
201 verify_if_often();
202 print_trace("allocation", cb, size);
203 return cb;
204 }
206 void CodeCache::free(CodeBlob* cb) {
207 assert_locked_or_safepoint(CodeCache_lock);
208 verify_if_often();
210 print_trace("free", cb);
211 if (cb->is_nmethod()) {
212 _number_of_nmethods--;
213 if (((nmethod *)cb)->has_dependencies()) {
214 _number_of_nmethods_with_dependencies--;
215 }
216 }
217 if (cb->is_adapter_blob()) {
218 _number_of_adapters--;
219 }
220 _number_of_blobs--;
222 _heap->deallocate(cb);
224 verify_if_often();
225 assert(_number_of_blobs >= 0, "sanity check");
226 }
229 void CodeCache::commit(CodeBlob* cb) {
230 // this is called by nmethod::nmethod, which must already own CodeCache_lock
231 assert_locked_or_safepoint(CodeCache_lock);
232 if (cb->is_nmethod()) {
233 _number_of_nmethods++;
234 if (((nmethod *)cb)->has_dependencies()) {
235 _number_of_nmethods_with_dependencies++;
236 }
237 }
238 if (cb->is_adapter_blob()) {
239 _number_of_adapters++;
240 }
242 // flush the hardware I-cache
243 ICache::invalidate_range(cb->content_begin(), cb->content_size());
244 }
247 void CodeCache::flush() {
248 assert_locked_or_safepoint(CodeCache_lock);
249 Unimplemented();
250 }
253 // Iteration over CodeBlobs
255 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
256 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
257 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
260 bool CodeCache::contains(void *p) {
261 // It should be ok to call contains without holding a lock
262 return _heap->contains(p);
263 }
266 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
267 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
268 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
269 CodeBlob* CodeCache::find_blob(void* start) {
270 CodeBlob* result = find_blob_unsafe(start);
271 if (result == NULL) return NULL;
272 // We could potientially look up non_entrant methods
273 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
274 return result;
275 }
277 nmethod* CodeCache::find_nmethod(void* start) {
278 CodeBlob *cb = find_blob(start);
279 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
280 return (nmethod*)cb;
281 }
284 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
285 assert_locked_or_safepoint(CodeCache_lock);
286 FOR_ALL_BLOBS(p) {
287 f(p);
288 }
289 }
292 void CodeCache::nmethods_do(void f(nmethod* nm)) {
293 assert_locked_or_safepoint(CodeCache_lock);
294 FOR_ALL_BLOBS(nm) {
295 if (nm->is_nmethod()) f((nmethod*)nm);
296 }
297 }
299 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
300 assert_locked_or_safepoint(CodeCache_lock);
301 FOR_ALL_ALIVE_NMETHODS(nm) {
302 f(nm);
303 }
304 }
306 int CodeCache::alignment_unit() {
307 return (int)_heap->alignment_unit();
308 }
311 int CodeCache::alignment_offset() {
312 return (int)_heap->alignment_offset();
313 }
316 // Mark nmethods for unloading if they contain otherwise unreachable
317 // oops.
318 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
319 assert_locked_or_safepoint(CodeCache_lock);
320 FOR_ALL_ALIVE_NMETHODS(nm) {
321 nm->do_unloading(is_alive, unloading_occurred);
322 }
323 }
325 void CodeCache::blobs_do(CodeBlobClosure* f) {
326 assert_locked_or_safepoint(CodeCache_lock);
327 FOR_ALL_ALIVE_BLOBS(cb) {
328 f->do_code_blob(cb);
330 #ifdef ASSERT
331 if (cb->is_nmethod())
332 ((nmethod*)cb)->verify_scavenge_root_oops();
333 #endif //ASSERT
334 }
335 }
337 // Walk the list of methods which might contain non-perm oops.
338 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
339 assert_locked_or_safepoint(CodeCache_lock);
340 debug_only(mark_scavenge_root_nmethods());
342 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
343 debug_only(cur->clear_scavenge_root_marked());
344 assert(cur->scavenge_root_not_marked(), "");
345 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
347 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
348 #ifndef PRODUCT
349 if (TraceScavenge) {
350 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
351 }
352 #endif //PRODUCT
353 if (is_live) {
354 // Perform cur->oops_do(f), maybe just once per nmethod.
355 f->do_code_blob(cur);
356 }
357 }
359 // Check for stray marks.
360 debug_only(verify_perm_nmethods(NULL));
361 }
363 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
364 assert_locked_or_safepoint(CodeCache_lock);
365 nm->set_on_scavenge_root_list();
366 nm->set_scavenge_root_link(_scavenge_root_nmethods);
367 set_scavenge_root_nmethods(nm);
368 print_trace("add_scavenge_root", nm);
369 }
371 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
372 assert_locked_or_safepoint(CodeCache_lock);
373 print_trace("drop_scavenge_root", nm);
374 nmethod* last = NULL;
375 nmethod* cur = scavenge_root_nmethods();
376 while (cur != NULL) {
377 nmethod* next = cur->scavenge_root_link();
378 if (cur == nm) {
379 if (last != NULL)
380 last->set_scavenge_root_link(next);
381 else set_scavenge_root_nmethods(next);
382 nm->set_scavenge_root_link(NULL);
383 nm->clear_on_scavenge_root_list();
384 return;
385 }
386 last = cur;
387 cur = next;
388 }
389 assert(false, "should have been on list");
390 }
392 void CodeCache::prune_scavenge_root_nmethods() {
393 assert_locked_or_safepoint(CodeCache_lock);
394 debug_only(mark_scavenge_root_nmethods());
396 nmethod* last = NULL;
397 nmethod* cur = scavenge_root_nmethods();
398 while (cur != NULL) {
399 nmethod* next = cur->scavenge_root_link();
400 debug_only(cur->clear_scavenge_root_marked());
401 assert(cur->scavenge_root_not_marked(), "");
402 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
404 if (!cur->is_zombie() && !cur->is_unloaded()
405 && cur->detect_scavenge_root_oops()) {
406 // Keep it. Advance 'last' to prevent deletion.
407 last = cur;
408 } else {
409 // Prune it from the list, so we don't have to look at it any more.
410 print_trace("prune_scavenge_root", cur);
411 cur->set_scavenge_root_link(NULL);
412 cur->clear_on_scavenge_root_list();
413 if (last != NULL)
414 last->set_scavenge_root_link(next);
415 else set_scavenge_root_nmethods(next);
416 }
417 cur = next;
418 }
420 // Check for stray marks.
421 debug_only(verify_perm_nmethods(NULL));
422 }
424 #ifndef PRODUCT
425 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
426 // While we are here, verify the integrity of the list.
427 mark_scavenge_root_nmethods();
428 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
429 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
430 cur->clear_scavenge_root_marked();
431 }
432 verify_perm_nmethods(f);
433 }
435 // Temporarily mark nmethods that are claimed to be on the non-perm list.
436 void CodeCache::mark_scavenge_root_nmethods() {
437 FOR_ALL_ALIVE_BLOBS(cb) {
438 if (cb->is_nmethod()) {
439 nmethod *nm = (nmethod*)cb;
440 assert(nm->scavenge_root_not_marked(), "clean state");
441 if (nm->on_scavenge_root_list())
442 nm->set_scavenge_root_marked();
443 }
444 }
445 }
447 // If the closure is given, run it on the unlisted nmethods.
448 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
449 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
450 FOR_ALL_ALIVE_BLOBS(cb) {
451 bool call_f = (f_or_null != NULL);
452 if (cb->is_nmethod()) {
453 nmethod *nm = (nmethod*)cb;
454 assert(nm->scavenge_root_not_marked(), "must be already processed");
455 if (nm->on_scavenge_root_list())
456 call_f = false; // don't show this one to the client
457 nm->verify_scavenge_root_oops();
458 } else {
459 call_f = false; // not an nmethod
460 }
461 if (call_f) f_or_null->do_code_blob(cb);
462 }
463 }
464 #endif //PRODUCT
467 nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
468 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
469 nmethod* saved = _saved_nmethods;
470 nmethod* prev = NULL;
471 while (saved != NULL) {
472 if (saved->is_in_use() && saved->method() == m) {
473 if (prev != NULL) {
474 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
475 } else {
476 _saved_nmethods = saved->saved_nmethod_link();
477 }
478 assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
479 saved->set_speculatively_disconnected(false);
480 saved->set_saved_nmethod_link(NULL);
481 if (PrintMethodFlushing) {
482 saved->print_on(tty, " ### nmethod is reconnected\n");
483 }
484 if (LogCompilation && (xtty != NULL)) {
485 ttyLocker ttyl;
486 xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
487 xtty->method(m);
488 xtty->stamp();
489 xtty->end_elem();
490 }
491 return saved;
492 }
493 prev = saved;
494 saved = saved->saved_nmethod_link();
495 }
496 return NULL;
497 }
499 void CodeCache::remove_saved_code(nmethod* nm) {
500 // For conc swpr this will be called with CodeCache_lock taken by caller
501 assert_locked_or_safepoint(CodeCache_lock);
502 assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
503 nmethod* saved = _saved_nmethods;
504 nmethod* prev = NULL;
505 while (saved != NULL) {
506 if (saved == nm) {
507 if (prev != NULL) {
508 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
509 } else {
510 _saved_nmethods = saved->saved_nmethod_link();
511 }
512 if (LogCompilation && (xtty != NULL)) {
513 ttyLocker ttyl;
514 xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
515 xtty->stamp();
516 xtty->end_elem();
517 }
518 return;
519 }
520 prev = saved;
521 saved = saved->saved_nmethod_link();
522 }
523 ShouldNotReachHere();
524 }
526 void CodeCache::speculatively_disconnect(nmethod* nm) {
527 assert_locked_or_safepoint(CodeCache_lock);
528 assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
529 nm->set_saved_nmethod_link(_saved_nmethods);
530 _saved_nmethods = nm;
531 if (PrintMethodFlushing) {
532 nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
533 }
534 if (LogCompilation && (xtty != NULL)) {
535 ttyLocker ttyl;
536 xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
537 xtty->method(nm->method());
538 xtty->stamp();
539 xtty->end_elem();
540 }
541 nm->method()->clear_code();
542 nm->set_speculatively_disconnected(true);
543 }
546 void CodeCache::gc_prologue() {
547 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
548 }
551 void CodeCache::gc_epilogue() {
552 assert_locked_or_safepoint(CodeCache_lock);
553 FOR_ALL_ALIVE_BLOBS(cb) {
554 if (cb->is_nmethod()) {
555 nmethod *nm = (nmethod*)cb;
556 assert(!nm->is_unloaded(), "Tautology");
557 if (needs_cache_clean()) {
558 nm->cleanup_inline_caches();
559 }
560 DEBUG_ONLY(nm->verify());
561 nm->fix_oop_relocations();
562 }
563 }
564 set_needs_cache_clean(false);
565 prune_scavenge_root_nmethods();
566 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
568 #ifdef ASSERT
569 // make sure that we aren't leaking icholders
570 int count = 0;
571 FOR_ALL_BLOBS(cb) {
572 if (cb->is_nmethod()) {
573 RelocIterator iter((nmethod*)cb);
574 while(iter.next()) {
575 if (iter.type() == relocInfo::virtual_call_type) {
576 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
577 CompiledIC *ic = CompiledIC_at(iter.reloc());
578 if (TraceCompiledIC) {
579 tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
580 ic->print();
581 }
582 assert(ic->cached_icholder() != NULL, "must be non-NULL");
583 count++;
584 }
585 }
586 }
587 }
588 }
590 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
591 CompiledICHolder::live_count(), "must agree");
592 #endif
593 }
596 void CodeCache::verify_oops() {
597 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
598 VerifyOopClosure voc;
599 FOR_ALL_ALIVE_BLOBS(cb) {
600 if (cb->is_nmethod()) {
601 nmethod *nm = (nmethod*)cb;
602 nm->oops_do(&voc);
603 nm->verify_oop_relocations();
604 }
605 }
606 }
609 address CodeCache::first_address() {
610 assert_locked_or_safepoint(CodeCache_lock);
611 return (address)_heap->low_boundary();
612 }
615 address CodeCache::last_address() {
616 assert_locked_or_safepoint(CodeCache_lock);
617 return (address)_heap->high();
618 }
621 void icache_init();
623 void CodeCache::initialize() {
624 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
625 #ifdef COMPILER2
626 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
627 #endif
628 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
629 // This was originally just a check of the alignment, causing failure, instead, round
630 // the code cache to the page size. In particular, Solaris is moving to a larger
631 // default page size.
632 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
633 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
634 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
635 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
636 vm_exit_during_initialization("Could not reserve enough space for code cache");
637 }
639 MemoryService::add_code_heap_memory_pool(_heap);
641 // Initialize ICache flush mechanism
642 // This service is needed for os::register_code_area
643 icache_init();
645 // Give OS a chance to register generated code area.
646 // This is used on Windows 64 bit platforms to register
647 // Structured Exception Handlers for our generated code.
648 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
649 }
652 void codeCache_init() {
653 CodeCache::initialize();
654 }
656 //------------------------------------------------------------------------------------------------
658 int CodeCache::number_of_nmethods_with_dependencies() {
659 return _number_of_nmethods_with_dependencies;
660 }
662 void CodeCache::clear_inline_caches() {
663 assert_locked_or_safepoint(CodeCache_lock);
664 FOR_ALL_ALIVE_NMETHODS(nm) {
665 nm->clear_inline_caches();
666 }
667 }
669 #ifndef PRODUCT
670 // used to keep track of how much time is spent in mark_for_deoptimization
671 static elapsedTimer dependentCheckTime;
672 static int dependentCheckCount = 0;
673 #endif // PRODUCT
676 int CodeCache::mark_for_deoptimization(DepChange& changes) {
677 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
679 #ifndef PRODUCT
680 dependentCheckTime.start();
681 dependentCheckCount++;
682 #endif // PRODUCT
684 int number_of_marked_CodeBlobs = 0;
686 // search the hierarchy looking for nmethods which are affected by the loading of this class
688 // then search the interfaces this class implements looking for nmethods
689 // which might be dependent of the fact that an interface only had one
690 // implementor.
692 { No_Safepoint_Verifier nsv;
693 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
694 Klass* d = str.klass();
695 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
696 }
697 }
699 if (VerifyDependencies) {
700 // Turn off dependency tracing while actually testing deps.
701 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
702 FOR_ALL_ALIVE_NMETHODS(nm) {
703 if (!nm->is_marked_for_deoptimization() &&
704 nm->check_all_dependencies()) {
705 ResourceMark rm;
706 tty->print_cr("Should have been marked for deoptimization:");
707 changes.print();
708 nm->print();
709 nm->print_dependencies();
710 }
711 }
712 }
714 #ifndef PRODUCT
715 dependentCheckTime.stop();
716 #endif // PRODUCT
718 return number_of_marked_CodeBlobs;
719 }
722 #ifdef HOTSWAP
723 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
724 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
725 int number_of_marked_CodeBlobs = 0;
727 // Deoptimize all methods of the evolving class itself
728 Array<Method*>* old_methods = dependee->methods();
729 for (int i = 0; i < old_methods->length(); i++) {
730 ResourceMark rm;
731 Method* old_method = old_methods->at(i);
732 nmethod *nm = old_method->code();
733 if (nm != NULL) {
734 nm->mark_for_deoptimization();
735 number_of_marked_CodeBlobs++;
736 }
737 }
739 FOR_ALL_ALIVE_NMETHODS(nm) {
740 if (nm->is_marked_for_deoptimization()) {
741 // ...Already marked in the previous pass; don't count it again.
742 } else if (nm->is_evol_dependent_on(dependee())) {
743 ResourceMark rm;
744 nm->mark_for_deoptimization();
745 number_of_marked_CodeBlobs++;
746 } else {
747 // flush caches in case they refer to a redefined Method*
748 nm->clear_inline_caches();
749 }
750 }
752 return number_of_marked_CodeBlobs;
753 }
754 #endif // HOTSWAP
757 // Deoptimize all methods
758 void CodeCache::mark_all_nmethods_for_deoptimization() {
759 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
760 FOR_ALL_ALIVE_NMETHODS(nm) {
761 nm->mark_for_deoptimization();
762 }
763 }
766 int CodeCache::mark_for_deoptimization(Method* dependee) {
767 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
768 int number_of_marked_CodeBlobs = 0;
770 FOR_ALL_ALIVE_NMETHODS(nm) {
771 if (nm->is_dependent_on_method(dependee)) {
772 ResourceMark rm;
773 nm->mark_for_deoptimization();
774 number_of_marked_CodeBlobs++;
775 }
776 }
778 return number_of_marked_CodeBlobs;
779 }
781 void CodeCache::make_marked_nmethods_zombies() {
782 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
783 FOR_ALL_ALIVE_NMETHODS(nm) {
784 if (nm->is_marked_for_deoptimization()) {
786 // If the nmethod has already been made non-entrant and it can be converted
787 // then zombie it now. Otherwise make it non-entrant and it will eventually
788 // be zombied when it is no longer seen on the stack. Note that the nmethod
789 // might be "entrant" and not on the stack and so could be zombied immediately
790 // but we can't tell because we don't track it on stack until it becomes
791 // non-entrant.
793 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
794 nm->make_zombie();
795 } else {
796 nm->make_not_entrant();
797 }
798 }
799 }
800 }
802 void CodeCache::make_marked_nmethods_not_entrant() {
803 assert_locked_or_safepoint(CodeCache_lock);
804 FOR_ALL_ALIVE_NMETHODS(nm) {
805 if (nm->is_marked_for_deoptimization()) {
806 nm->make_not_entrant();
807 }
808 }
809 }
811 void CodeCache::verify() {
812 _heap->verify();
813 FOR_ALL_ALIVE_BLOBS(p) {
814 p->verify();
815 }
816 }
818 //------------------------------------------------------------------------------------------------
819 // Non-product version
821 #ifndef PRODUCT
823 void CodeCache::verify_if_often() {
824 if (VerifyCodeCacheOften) {
825 _heap->verify();
826 }
827 }
829 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
830 if (PrintCodeCache2) { // Need to add a new flag
831 ResourceMark rm;
832 if (size == 0) size = cb->size();
833 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
834 }
835 }
837 void CodeCache::print_internals() {
838 int nmethodCount = 0;
839 int runtimeStubCount = 0;
840 int adapterCount = 0;
841 int deoptimizationStubCount = 0;
842 int uncommonTrapStubCount = 0;
843 int bufferBlobCount = 0;
844 int total = 0;
845 int nmethodAlive = 0;
846 int nmethodNotEntrant = 0;
847 int nmethodZombie = 0;
848 int nmethodUnloaded = 0;
849 int nmethodJava = 0;
850 int nmethodNative = 0;
851 int maxCodeSize = 0;
852 ResourceMark rm;
854 CodeBlob *cb;
855 for (cb = first(); cb != NULL; cb = next(cb)) {
856 total++;
857 if (cb->is_nmethod()) {
858 nmethod* nm = (nmethod*)cb;
860 if (Verbose && nm->method() != NULL) {
861 ResourceMark rm;
862 char *method_name = nm->method()->name_and_sig_as_C_string();
863 tty->print("%s", method_name);
864 if(nm->is_alive()) { tty->print_cr(" alive"); }
865 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
866 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
867 }
869 nmethodCount++;
871 if(nm->is_alive()) { nmethodAlive++; }
872 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
873 if(nm->is_zombie()) { nmethodZombie++; }
874 if(nm->is_unloaded()) { nmethodUnloaded++; }
875 if(nm->is_native_method()) { nmethodNative++; }
877 if(nm->method() != NULL && nm->is_java_method()) {
878 nmethodJava++;
879 if (nm->insts_size() > maxCodeSize) {
880 maxCodeSize = nm->insts_size();
881 }
882 }
883 } else if (cb->is_runtime_stub()) {
884 runtimeStubCount++;
885 } else if (cb->is_deoptimization_stub()) {
886 deoptimizationStubCount++;
887 } else if (cb->is_uncommon_trap_stub()) {
888 uncommonTrapStubCount++;
889 } else if (cb->is_adapter_blob()) {
890 adapterCount++;
891 } else if (cb->is_buffer_blob()) {
892 bufferBlobCount++;
893 }
894 }
896 int bucketSize = 512;
897 int bucketLimit = maxCodeSize / bucketSize + 1;
898 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
899 memset(buckets,0,sizeof(int) * bucketLimit);
901 for (cb = first(); cb != NULL; cb = next(cb)) {
902 if (cb->is_nmethod()) {
903 nmethod* nm = (nmethod*)cb;
904 if(nm->is_java_method()) {
905 buckets[nm->insts_size() / bucketSize]++;
906 }
907 }
908 }
909 tty->print_cr("Code Cache Entries (total of %d)",total);
910 tty->print_cr("-------------------------------------------------");
911 tty->print_cr("nmethods: %d",nmethodCount);
912 tty->print_cr("\talive: %d",nmethodAlive);
913 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
914 tty->print_cr("\tzombie: %d",nmethodZombie);
915 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
916 tty->print_cr("\tjava: %d",nmethodJava);
917 tty->print_cr("\tnative: %d",nmethodNative);
918 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
919 tty->print_cr("adapters: %d",adapterCount);
920 tty->print_cr("buffer blobs: %d",bufferBlobCount);
921 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
922 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
923 tty->print_cr("\nnmethod size distribution (non-zombie java)");
924 tty->print_cr("-------------------------------------------------");
926 for(int i=0; i<bucketLimit; i++) {
927 if(buckets[i] != 0) {
928 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
929 tty->fill_to(40);
930 tty->print_cr("%d",buckets[i]);
931 }
932 }
934 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
935 }
937 #endif // !PRODUCT
939 void CodeCache::print() {
940 print_summary(tty);
942 #ifndef PRODUCT
943 if (!Verbose) return;
945 CodeBlob_sizes live;
946 CodeBlob_sizes dead;
948 FOR_ALL_BLOBS(p) {
949 if (!p->is_alive()) {
950 dead.add(p);
951 } else {
952 live.add(p);
953 }
954 }
956 tty->print_cr("CodeCache:");
958 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
959 dependentCheckTime.seconds() / dependentCheckCount);
961 if (!live.is_empty()) {
962 live.print("live");
963 }
964 if (!dead.is_empty()) {
965 dead.print("dead");
966 }
969 if (WizardMode) {
970 // print the oop_map usage
971 int code_size = 0;
972 int number_of_blobs = 0;
973 int number_of_oop_maps = 0;
974 int map_size = 0;
975 FOR_ALL_BLOBS(p) {
976 if (p->is_alive()) {
977 number_of_blobs++;
978 code_size += p->code_size();
979 OopMapSet* set = p->oop_maps();
980 if (set != NULL) {
981 number_of_oop_maps += set->size();
982 map_size += set->heap_size();
983 }
984 }
985 }
986 tty->print_cr("OopMaps");
987 tty->print_cr(" #blobs = %d", number_of_blobs);
988 tty->print_cr(" code size = %d", code_size);
989 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
990 tty->print_cr(" map size = %d", map_size);
991 }
993 #endif // !PRODUCT
994 }
996 void CodeCache::print_summary(outputStream* st, bool detailed) {
997 size_t total = (_heap->high_boundary() - _heap->low_boundary());
998 st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
999 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
1000 total/K, (total - unallocated_capacity())/K,
1001 maxCodeCacheUsed/K, unallocated_capacity()/K);
1003 if (detailed) {
1004 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
1005 _heap->low_boundary(),
1006 _heap->high(),
1007 _heap->high_boundary());
1008 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
1009 " adapters=" UINT32_FORMAT,
1010 nof_blobs(), nof_nmethods(), nof_adapters());
1011 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
1012 "enabled" : Arguments::mode() == Arguments::_int ?
1013 "disabled (interpreter mode)" :
1014 "disabled (not enough contiguous free space left)");
1015 }
1016 }
1018 void CodeCache::log_state(outputStream* st) {
1019 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
1020 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
1021 nof_blobs(), nof_nmethods(), nof_adapters(),
1022 unallocated_capacity());
1023 }