Fri, 28 Sep 2012 10:16:29 -0700
Merge
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "gc_implementation/shared/markSweep.hpp"
34 #include "memory/allocation.inline.hpp"
35 #include "memory/gcLocker.hpp"
36 #include "memory/iterator.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "oops/method.hpp"
39 #include "oops/objArrayOop.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/icache.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "services/memoryService.hpp"
46 #include "utilities/xmlstream.hpp"
48 // Helper class for printing in CodeCache
50 class CodeBlob_sizes {
51 private:
52 int count;
53 int total_size;
54 int header_size;
55 int code_size;
56 int stub_size;
57 int relocation_size;
58 int scopes_oop_size;
59 int scopes_metadata_size;
60 int scopes_data_size;
61 int scopes_pcs_size;
63 public:
64 CodeBlob_sizes() {
65 count = 0;
66 total_size = 0;
67 header_size = 0;
68 code_size = 0;
69 stub_size = 0;
70 relocation_size = 0;
71 scopes_oop_size = 0;
72 scopes_metadata_size = 0;
73 scopes_data_size = 0;
74 scopes_pcs_size = 0;
75 }
77 int total() { return total_size; }
78 bool is_empty() { return count == 0; }
80 void print(const char* title) {
81 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
82 count,
83 title,
84 total() / K,
85 header_size * 100 / total_size,
86 relocation_size * 100 / total_size,
87 code_size * 100 / total_size,
88 stub_size * 100 / total_size,
89 scopes_oop_size * 100 / total_size,
90 scopes_metadata_size * 100 / total_size,
91 scopes_data_size * 100 / total_size,
92 scopes_pcs_size * 100 / total_size);
93 }
95 void add(CodeBlob* cb) {
96 count++;
97 total_size += cb->size();
98 header_size += cb->header_size();
99 relocation_size += cb->relocation_size();
100 if (cb->is_nmethod()) {
101 nmethod* nm = cb->as_nmethod_or_null();
102 code_size += nm->insts_size();
103 stub_size += nm->stub_size();
105 scopes_oop_size += nm->oops_size();
106 scopes_metadata_size += nm->metadata_size();
107 scopes_data_size += nm->scopes_data_size();
108 scopes_pcs_size += nm->scopes_pcs_size();
109 } else {
110 code_size += cb->code_size();
111 }
112 }
113 };
116 // CodeCache implementation
118 CodeHeap * CodeCache::_heap = new CodeHeap();
119 int CodeCache::_number_of_blobs = 0;
120 int CodeCache::_number_of_adapters = 0;
121 int CodeCache::_number_of_nmethods = 0;
122 int CodeCache::_number_of_nmethods_with_dependencies = 0;
123 bool CodeCache::_needs_cache_clean = false;
124 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
125 nmethod* CodeCache::_saved_nmethods = NULL;
128 CodeBlob* CodeCache::first() {
129 assert_locked_or_safepoint(CodeCache_lock);
130 return (CodeBlob*)_heap->first();
131 }
134 CodeBlob* CodeCache::next(CodeBlob* cb) {
135 assert_locked_or_safepoint(CodeCache_lock);
136 return (CodeBlob*)_heap->next(cb);
137 }
140 CodeBlob* CodeCache::alive(CodeBlob *cb) {
141 assert_locked_or_safepoint(CodeCache_lock);
142 while (cb != NULL && !cb->is_alive()) cb = next(cb);
143 return cb;
144 }
147 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
148 assert_locked_or_safepoint(CodeCache_lock);
149 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
150 return (nmethod*)cb;
151 }
153 nmethod* CodeCache::first_nmethod() {
154 assert_locked_or_safepoint(CodeCache_lock);
155 CodeBlob* cb = first();
156 while (cb != NULL && !cb->is_nmethod()) {
157 cb = next(cb);
158 }
159 return (nmethod*)cb;
160 }
162 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
163 assert_locked_or_safepoint(CodeCache_lock);
164 cb = next(cb);
165 while (cb != NULL && !cb->is_nmethod()) {
166 cb = next(cb);
167 }
168 return (nmethod*)cb;
169 }
171 CodeBlob* CodeCache::allocate(int size) {
172 // Do not seize the CodeCache lock here--if the caller has not
173 // already done so, we are going to lose bigtime, since the code
174 // cache will contain a garbage CodeBlob until the caller can
175 // run the constructor for the CodeBlob subclass he is busy
176 // instantiating.
177 guarantee(size >= 0, "allocation request must be reasonable");
178 assert_locked_or_safepoint(CodeCache_lock);
179 CodeBlob* cb = NULL;
180 _number_of_blobs++;
181 while (true) {
182 cb = (CodeBlob*)_heap->allocate(size);
183 if (cb != NULL) break;
184 if (!_heap->expand_by(CodeCacheExpansionSize)) {
185 // Expansion failed
186 return NULL;
187 }
188 if (PrintCodeCacheExtension) {
189 ResourceMark rm;
190 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
191 (intptr_t)_heap->begin(), (intptr_t)_heap->end(),
192 (address)_heap->end() - (address)_heap->begin());
193 }
194 }
195 verify_if_often();
196 print_trace("allocation", cb, size);
197 return cb;
198 }
200 void CodeCache::free(CodeBlob* cb) {
201 assert_locked_or_safepoint(CodeCache_lock);
202 verify_if_often();
204 print_trace("free", cb);
205 if (cb->is_nmethod()) {
206 _number_of_nmethods--;
207 if (((nmethod *)cb)->has_dependencies()) {
208 _number_of_nmethods_with_dependencies--;
209 }
210 }
211 if (cb->is_adapter_blob()) {
212 _number_of_adapters--;
213 }
214 _number_of_blobs--;
216 _heap->deallocate(cb);
218 verify_if_often();
219 assert(_number_of_blobs >= 0, "sanity check");
220 }
223 void CodeCache::commit(CodeBlob* cb) {
224 // this is called by nmethod::nmethod, which must already own CodeCache_lock
225 assert_locked_or_safepoint(CodeCache_lock);
226 if (cb->is_nmethod()) {
227 _number_of_nmethods++;
228 if (((nmethod *)cb)->has_dependencies()) {
229 _number_of_nmethods_with_dependencies++;
230 }
231 }
232 if (cb->is_adapter_blob()) {
233 _number_of_adapters++;
234 }
236 // flush the hardware I-cache
237 ICache::invalidate_range(cb->content_begin(), cb->content_size());
238 }
241 void CodeCache::flush() {
242 assert_locked_or_safepoint(CodeCache_lock);
243 Unimplemented();
244 }
247 // Iteration over CodeBlobs
249 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
250 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
251 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
254 bool CodeCache::contains(void *p) {
255 // It should be ok to call contains without holding a lock
256 return _heap->contains(p);
257 }
260 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
261 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
262 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
263 CodeBlob* CodeCache::find_blob(void* start) {
264 CodeBlob* result = find_blob_unsafe(start);
265 if (result == NULL) return NULL;
266 // We could potientially look up non_entrant methods
267 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
268 return result;
269 }
271 nmethod* CodeCache::find_nmethod(void* start) {
272 CodeBlob *cb = find_blob(start);
273 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
274 return (nmethod*)cb;
275 }
278 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
279 assert_locked_or_safepoint(CodeCache_lock);
280 FOR_ALL_BLOBS(p) {
281 f(p);
282 }
283 }
286 void CodeCache::nmethods_do(void f(nmethod* nm)) {
287 assert_locked_or_safepoint(CodeCache_lock);
288 FOR_ALL_BLOBS(nm) {
289 if (nm->is_nmethod()) f((nmethod*)nm);
290 }
291 }
293 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
294 assert_locked_or_safepoint(CodeCache_lock);
295 FOR_ALL_ALIVE_NMETHODS(nm) {
296 f(nm);
297 }
298 }
300 int CodeCache::alignment_unit() {
301 return (int)_heap->alignment_unit();
302 }
305 int CodeCache::alignment_offset() {
306 return (int)_heap->alignment_offset();
307 }
310 // Mark nmethods for unloading if they contain otherwise unreachable
311 // oops.
312 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
313 assert_locked_or_safepoint(CodeCache_lock);
314 FOR_ALL_ALIVE_NMETHODS(nm) {
315 nm->do_unloading(is_alive, unloading_occurred);
316 }
317 }
319 void CodeCache::blobs_do(CodeBlobClosure* f) {
320 assert_locked_or_safepoint(CodeCache_lock);
321 FOR_ALL_ALIVE_BLOBS(cb) {
322 f->do_code_blob(cb);
324 #ifdef ASSERT
325 if (cb->is_nmethod())
326 ((nmethod*)cb)->verify_scavenge_root_oops();
327 #endif //ASSERT
328 }
329 }
331 // Walk the list of methods which might contain non-perm oops.
332 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
333 assert_locked_or_safepoint(CodeCache_lock);
334 debug_only(mark_scavenge_root_nmethods());
336 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
337 debug_only(cur->clear_scavenge_root_marked());
338 assert(cur->scavenge_root_not_marked(), "");
339 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
341 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
342 #ifndef PRODUCT
343 if (TraceScavenge) {
344 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
345 }
346 #endif //PRODUCT
347 if (is_live) {
348 // Perform cur->oops_do(f), maybe just once per nmethod.
349 f->do_code_blob(cur);
350 }
351 }
353 // Check for stray marks.
354 debug_only(verify_perm_nmethods(NULL));
355 }
357 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
358 assert_locked_or_safepoint(CodeCache_lock);
359 nm->set_on_scavenge_root_list();
360 nm->set_scavenge_root_link(_scavenge_root_nmethods);
361 set_scavenge_root_nmethods(nm);
362 print_trace("add_scavenge_root", nm);
363 }
365 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
366 assert_locked_or_safepoint(CodeCache_lock);
367 print_trace("drop_scavenge_root", nm);
368 nmethod* last = NULL;
369 nmethod* cur = scavenge_root_nmethods();
370 while (cur != NULL) {
371 nmethod* next = cur->scavenge_root_link();
372 if (cur == nm) {
373 if (last != NULL)
374 last->set_scavenge_root_link(next);
375 else set_scavenge_root_nmethods(next);
376 nm->set_scavenge_root_link(NULL);
377 nm->clear_on_scavenge_root_list();
378 return;
379 }
380 last = cur;
381 cur = next;
382 }
383 assert(false, "should have been on list");
384 }
386 void CodeCache::prune_scavenge_root_nmethods() {
387 assert_locked_or_safepoint(CodeCache_lock);
388 debug_only(mark_scavenge_root_nmethods());
390 nmethod* last = NULL;
391 nmethod* cur = scavenge_root_nmethods();
392 while (cur != NULL) {
393 nmethod* next = cur->scavenge_root_link();
394 debug_only(cur->clear_scavenge_root_marked());
395 assert(cur->scavenge_root_not_marked(), "");
396 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
398 if (!cur->is_zombie() && !cur->is_unloaded()
399 && cur->detect_scavenge_root_oops()) {
400 // Keep it. Advance 'last' to prevent deletion.
401 last = cur;
402 } else {
403 // Prune it from the list, so we don't have to look at it any more.
404 print_trace("prune_scavenge_root", cur);
405 cur->set_scavenge_root_link(NULL);
406 cur->clear_on_scavenge_root_list();
407 if (last != NULL)
408 last->set_scavenge_root_link(next);
409 else set_scavenge_root_nmethods(next);
410 }
411 cur = next;
412 }
414 // Check for stray marks.
415 debug_only(verify_perm_nmethods(NULL));
416 }
418 #ifndef PRODUCT
419 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
420 // While we are here, verify the integrity of the list.
421 mark_scavenge_root_nmethods();
422 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
423 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
424 cur->clear_scavenge_root_marked();
425 }
426 verify_perm_nmethods(f);
427 }
429 // Temporarily mark nmethods that are claimed to be on the non-perm list.
430 void CodeCache::mark_scavenge_root_nmethods() {
431 FOR_ALL_ALIVE_BLOBS(cb) {
432 if (cb->is_nmethod()) {
433 nmethod *nm = (nmethod*)cb;
434 assert(nm->scavenge_root_not_marked(), "clean state");
435 if (nm->on_scavenge_root_list())
436 nm->set_scavenge_root_marked();
437 }
438 }
439 }
441 // If the closure is given, run it on the unlisted nmethods.
442 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
443 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
444 FOR_ALL_ALIVE_BLOBS(cb) {
445 bool call_f = (f_or_null != NULL);
446 if (cb->is_nmethod()) {
447 nmethod *nm = (nmethod*)cb;
448 assert(nm->scavenge_root_not_marked(), "must be already processed");
449 if (nm->on_scavenge_root_list())
450 call_f = false; // don't show this one to the client
451 nm->verify_scavenge_root_oops();
452 } else {
453 call_f = false; // not an nmethod
454 }
455 if (call_f) f_or_null->do_code_blob(cb);
456 }
457 }
458 #endif //PRODUCT
461 nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
462 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
463 nmethod* saved = _saved_nmethods;
464 nmethod* prev = NULL;
465 while (saved != NULL) {
466 if (saved->is_in_use() && saved->method() == m) {
467 if (prev != NULL) {
468 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
469 } else {
470 _saved_nmethods = saved->saved_nmethod_link();
471 }
472 assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
473 saved->set_speculatively_disconnected(false);
474 saved->set_saved_nmethod_link(NULL);
475 if (PrintMethodFlushing) {
476 saved->print_on(tty, " ### nmethod is reconnected\n");
477 }
478 if (LogCompilation && (xtty != NULL)) {
479 ttyLocker ttyl;
480 xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
481 xtty->method(m);
482 xtty->stamp();
483 xtty->end_elem();
484 }
485 return saved;
486 }
487 prev = saved;
488 saved = saved->saved_nmethod_link();
489 }
490 return NULL;
491 }
493 void CodeCache::remove_saved_code(nmethod* nm) {
494 // For conc swpr this will be called with CodeCache_lock taken by caller
495 assert_locked_or_safepoint(CodeCache_lock);
496 assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
497 nmethod* saved = _saved_nmethods;
498 nmethod* prev = NULL;
499 while (saved != NULL) {
500 if (saved == nm) {
501 if (prev != NULL) {
502 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
503 } else {
504 _saved_nmethods = saved->saved_nmethod_link();
505 }
506 if (LogCompilation && (xtty != NULL)) {
507 ttyLocker ttyl;
508 xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
509 xtty->stamp();
510 xtty->end_elem();
511 }
512 return;
513 }
514 prev = saved;
515 saved = saved->saved_nmethod_link();
516 }
517 ShouldNotReachHere();
518 }
520 void CodeCache::speculatively_disconnect(nmethod* nm) {
521 assert_locked_or_safepoint(CodeCache_lock);
522 assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
523 nm->set_saved_nmethod_link(_saved_nmethods);
524 _saved_nmethods = nm;
525 if (PrintMethodFlushing) {
526 nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
527 }
528 if (LogCompilation && (xtty != NULL)) {
529 ttyLocker ttyl;
530 xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
531 xtty->method(nm->method());
532 xtty->stamp();
533 xtty->end_elem();
534 }
535 nm->method()->clear_code();
536 nm->set_speculatively_disconnected(true);
537 }
540 void CodeCache::gc_prologue() {
541 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
542 }
545 void CodeCache::gc_epilogue() {
546 assert_locked_or_safepoint(CodeCache_lock);
547 FOR_ALL_ALIVE_BLOBS(cb) {
548 if (cb->is_nmethod()) {
549 nmethod *nm = (nmethod*)cb;
550 assert(!nm->is_unloaded(), "Tautology");
551 if (needs_cache_clean()) {
552 nm->cleanup_inline_caches();
553 }
554 DEBUG_ONLY(nm->verify());
555 nm->fix_oop_relocations();
556 }
557 }
558 set_needs_cache_clean(false);
559 prune_scavenge_root_nmethods();
560 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
562 #ifdef ASSERT
563 // make sure that we aren't leaking icholders
564 int count = 0;
565 FOR_ALL_BLOBS(cb) {
566 if (cb->is_nmethod()) {
567 RelocIterator iter((nmethod*)cb);
568 while(iter.next()) {
569 if (iter.type() == relocInfo::virtual_call_type) {
570 if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
571 CompiledIC *ic = CompiledIC_at(iter.reloc());
572 if (TraceCompiledIC) {
573 tty->print("noticed icholder " INTPTR_FORMAT " ", ic->cached_icholder());
574 ic->print();
575 }
576 assert(ic->cached_icholder() != NULL, "must be non-NULL");
577 count++;
578 }
579 }
580 }
581 }
582 }
584 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
585 CompiledICHolder::live_count(), "must agree");
586 #endif
587 }
590 void CodeCache::verify_oops() {
591 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
592 VerifyOopClosure voc;
593 FOR_ALL_ALIVE_BLOBS(cb) {
594 if (cb->is_nmethod()) {
595 nmethod *nm = (nmethod*)cb;
596 nm->oops_do(&voc);
597 nm->verify_oop_relocations();
598 }
599 }
600 }
603 address CodeCache::first_address() {
604 assert_locked_or_safepoint(CodeCache_lock);
605 return (address)_heap->begin();
606 }
609 address CodeCache::last_address() {
610 assert_locked_or_safepoint(CodeCache_lock);
611 return (address)_heap->end();
612 }
615 void icache_init();
617 void CodeCache::initialize() {
618 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
619 #ifdef COMPILER2
620 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
621 #endif
622 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
623 // This was originally just a check of the alignment, causing failure, instead, round
624 // the code cache to the page size. In particular, Solaris is moving to a larger
625 // default page size.
626 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
627 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
628 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
629 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
630 vm_exit_during_initialization("Could not reserve enough space for code cache");
631 }
633 MemoryService::add_code_heap_memory_pool(_heap);
635 // Initialize ICache flush mechanism
636 // This service is needed for os::register_code_area
637 icache_init();
639 // Give OS a chance to register generated code area.
640 // This is used on Windows 64 bit platforms to register
641 // Structured Exception Handlers for our generated code.
642 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
643 }
646 void codeCache_init() {
647 CodeCache::initialize();
648 }
650 //------------------------------------------------------------------------------------------------
652 int CodeCache::number_of_nmethods_with_dependencies() {
653 return _number_of_nmethods_with_dependencies;
654 }
656 void CodeCache::clear_inline_caches() {
657 assert_locked_or_safepoint(CodeCache_lock);
658 FOR_ALL_ALIVE_NMETHODS(nm) {
659 nm->clear_inline_caches();
660 }
661 }
663 #ifndef PRODUCT
664 // used to keep track of how much time is spent in mark_for_deoptimization
665 static elapsedTimer dependentCheckTime;
666 static int dependentCheckCount = 0;
667 #endif // PRODUCT
670 int CodeCache::mark_for_deoptimization(DepChange& changes) {
671 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
673 #ifndef PRODUCT
674 dependentCheckTime.start();
675 dependentCheckCount++;
676 #endif // PRODUCT
678 int number_of_marked_CodeBlobs = 0;
680 // search the hierarchy looking for nmethods which are affected by the loading of this class
682 // then search the interfaces this class implements looking for nmethods
683 // which might be dependent of the fact that an interface only had one
684 // implementor.
686 { No_Safepoint_Verifier nsv;
687 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
688 Klass* d = str.klass();
689 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
690 }
691 }
693 if (VerifyDependencies) {
694 // Turn off dependency tracing while actually testing deps.
695 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
696 FOR_ALL_ALIVE_NMETHODS(nm) {
697 if (!nm->is_marked_for_deoptimization() &&
698 nm->check_all_dependencies()) {
699 ResourceMark rm;
700 tty->print_cr("Should have been marked for deoptimization:");
701 changes.print();
702 nm->print();
703 nm->print_dependencies();
704 }
705 }
706 }
708 #ifndef PRODUCT
709 dependentCheckTime.stop();
710 #endif // PRODUCT
712 return number_of_marked_CodeBlobs;
713 }
716 #ifdef HOTSWAP
717 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
718 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
719 int number_of_marked_CodeBlobs = 0;
721 // Deoptimize all methods of the evolving class itself
722 Array<Method*>* old_methods = dependee->methods();
723 for (int i = 0; i < old_methods->length(); i++) {
724 ResourceMark rm;
725 Method* old_method = old_methods->at(i);
726 nmethod *nm = old_method->code();
727 if (nm != NULL) {
728 nm->mark_for_deoptimization();
729 number_of_marked_CodeBlobs++;
730 }
731 }
733 FOR_ALL_ALIVE_NMETHODS(nm) {
734 if (nm->is_marked_for_deoptimization()) {
735 // ...Already marked in the previous pass; don't count it again.
736 } else if (nm->is_evol_dependent_on(dependee())) {
737 ResourceMark rm;
738 nm->mark_for_deoptimization();
739 number_of_marked_CodeBlobs++;
740 } else {
741 // flush caches in case they refer to a redefined Method*
742 nm->clear_inline_caches();
743 }
744 }
746 return number_of_marked_CodeBlobs;
747 }
748 #endif // HOTSWAP
751 // Deoptimize all methods
752 void CodeCache::mark_all_nmethods_for_deoptimization() {
753 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
754 FOR_ALL_ALIVE_NMETHODS(nm) {
755 nm->mark_for_deoptimization();
756 }
757 }
760 int CodeCache::mark_for_deoptimization(Method* dependee) {
761 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
762 int number_of_marked_CodeBlobs = 0;
764 FOR_ALL_ALIVE_NMETHODS(nm) {
765 if (nm->is_dependent_on_method(dependee)) {
766 ResourceMark rm;
767 nm->mark_for_deoptimization();
768 number_of_marked_CodeBlobs++;
769 }
770 }
772 return number_of_marked_CodeBlobs;
773 }
775 void CodeCache::make_marked_nmethods_zombies() {
776 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
777 FOR_ALL_ALIVE_NMETHODS(nm) {
778 if (nm->is_marked_for_deoptimization()) {
780 // If the nmethod has already been made non-entrant and it can be converted
781 // then zombie it now. Otherwise make it non-entrant and it will eventually
782 // be zombied when it is no longer seen on the stack. Note that the nmethod
783 // might be "entrant" and not on the stack and so could be zombied immediately
784 // but we can't tell because we don't track it on stack until it becomes
785 // non-entrant.
787 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
788 nm->make_zombie();
789 } else {
790 nm->make_not_entrant();
791 }
792 }
793 }
794 }
796 void CodeCache::make_marked_nmethods_not_entrant() {
797 assert_locked_or_safepoint(CodeCache_lock);
798 FOR_ALL_ALIVE_NMETHODS(nm) {
799 if (nm->is_marked_for_deoptimization()) {
800 nm->make_not_entrant();
801 }
802 }
803 }
805 void CodeCache::verify() {
806 _heap->verify();
807 FOR_ALL_ALIVE_BLOBS(p) {
808 p->verify();
809 }
810 }
812 //------------------------------------------------------------------------------------------------
813 // Non-product version
815 #ifndef PRODUCT
817 void CodeCache::verify_if_often() {
818 if (VerifyCodeCacheOften) {
819 _heap->verify();
820 }
821 }
823 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
824 if (PrintCodeCache2) { // Need to add a new flag
825 ResourceMark rm;
826 if (size == 0) size = cb->size();
827 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
828 }
829 }
831 void CodeCache::print_internals() {
832 int nmethodCount = 0;
833 int runtimeStubCount = 0;
834 int adapterCount = 0;
835 int deoptimizationStubCount = 0;
836 int uncommonTrapStubCount = 0;
837 int bufferBlobCount = 0;
838 int total = 0;
839 int nmethodAlive = 0;
840 int nmethodNotEntrant = 0;
841 int nmethodZombie = 0;
842 int nmethodUnloaded = 0;
843 int nmethodJava = 0;
844 int nmethodNative = 0;
845 int maxCodeSize = 0;
846 ResourceMark rm;
848 CodeBlob *cb;
849 for (cb = first(); cb != NULL; cb = next(cb)) {
850 total++;
851 if (cb->is_nmethod()) {
852 nmethod* nm = (nmethod*)cb;
854 if (Verbose && nm->method() != NULL) {
855 ResourceMark rm;
856 char *method_name = nm->method()->name_and_sig_as_C_string();
857 tty->print("%s", method_name);
858 if(nm->is_alive()) { tty->print_cr(" alive"); }
859 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
860 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
861 }
863 nmethodCount++;
865 if(nm->is_alive()) { nmethodAlive++; }
866 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
867 if(nm->is_zombie()) { nmethodZombie++; }
868 if(nm->is_unloaded()) { nmethodUnloaded++; }
869 if(nm->is_native_method()) { nmethodNative++; }
871 if(nm->method() != NULL && nm->is_java_method()) {
872 nmethodJava++;
873 if (nm->insts_size() > maxCodeSize) {
874 maxCodeSize = nm->insts_size();
875 }
876 }
877 } else if (cb->is_runtime_stub()) {
878 runtimeStubCount++;
879 } else if (cb->is_deoptimization_stub()) {
880 deoptimizationStubCount++;
881 } else if (cb->is_uncommon_trap_stub()) {
882 uncommonTrapStubCount++;
883 } else if (cb->is_adapter_blob()) {
884 adapterCount++;
885 } else if (cb->is_buffer_blob()) {
886 bufferBlobCount++;
887 }
888 }
890 int bucketSize = 512;
891 int bucketLimit = maxCodeSize / bucketSize + 1;
892 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
893 memset(buckets,0,sizeof(int) * bucketLimit);
895 for (cb = first(); cb != NULL; cb = next(cb)) {
896 if (cb->is_nmethod()) {
897 nmethod* nm = (nmethod*)cb;
898 if(nm->is_java_method()) {
899 buckets[nm->insts_size() / bucketSize]++;
900 }
901 }
902 }
903 tty->print_cr("Code Cache Entries (total of %d)",total);
904 tty->print_cr("-------------------------------------------------");
905 tty->print_cr("nmethods: %d",nmethodCount);
906 tty->print_cr("\talive: %d",nmethodAlive);
907 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
908 tty->print_cr("\tzombie: %d",nmethodZombie);
909 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
910 tty->print_cr("\tjava: %d",nmethodJava);
911 tty->print_cr("\tnative: %d",nmethodNative);
912 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
913 tty->print_cr("adapters: %d",adapterCount);
914 tty->print_cr("buffer blobs: %d",bufferBlobCount);
915 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
916 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
917 tty->print_cr("\nnmethod size distribution (non-zombie java)");
918 tty->print_cr("-------------------------------------------------");
920 for(int i=0; i<bucketLimit; i++) {
921 if(buckets[i] != 0) {
922 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
923 tty->fill_to(40);
924 tty->print_cr("%d",buckets[i]);
925 }
926 }
928 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
929 }
931 void CodeCache::print() {
932 CodeBlob_sizes live;
933 CodeBlob_sizes dead;
935 FOR_ALL_BLOBS(p) {
936 if (!p->is_alive()) {
937 dead.add(p);
938 } else {
939 live.add(p);
940 }
941 }
943 tty->print_cr("CodeCache:");
945 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
946 dependentCheckTime.seconds() / dependentCheckCount);
948 if (!live.is_empty()) {
949 live.print("live");
950 }
951 if (!dead.is_empty()) {
952 dead.print("dead");
953 }
956 if (Verbose) {
957 // print the oop_map usage
958 int code_size = 0;
959 int number_of_blobs = 0;
960 int number_of_oop_maps = 0;
961 int map_size = 0;
962 FOR_ALL_BLOBS(p) {
963 if (p->is_alive()) {
964 number_of_blobs++;
965 code_size += p->code_size();
966 OopMapSet* set = p->oop_maps();
967 if (set != NULL) {
968 number_of_oop_maps += set->size();
969 map_size += set->heap_size();
970 }
971 }
972 }
973 tty->print_cr("OopMaps");
974 tty->print_cr(" #blobs = %d", number_of_blobs);
975 tty->print_cr(" code size = %d", code_size);
976 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
977 tty->print_cr(" map size = %d", map_size);
978 }
980 }
982 #endif // PRODUCT
984 void CodeCache::print_bounds(outputStream* st) {
985 st->print_cr("Code Cache [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
986 _heap->low_boundary(),
987 _heap->high(),
988 _heap->high_boundary());
989 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
990 " adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT "Kb"
991 " largest_free_block=" SIZE_FORMAT,
992 nof_blobs(), nof_nmethods(), nof_adapters(),
993 unallocated_capacity()/K, largest_free_block());
994 }
996 void CodeCache::log_state(outputStream* st) {
997 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
998 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'"
999 " largest_free_block='" SIZE_FORMAT "'",
1000 nof_blobs(), nof_nmethods(), nof_adapters(),
1001 unallocated_capacity(), largest_free_block());
1002 }
1004 size_t CodeCache::largest_free_block() {
1005 // This is called both with and without CodeCache_lock held so
1006 // handle both cases.
1007 if (CodeCache_lock->owned_by_self()) {
1008 return _heap->largest_free_block();
1009 } else {
1010 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1011 return _heap->largest_free_block();
1012 }
1013 }