Fri, 06 May 2011 16:33:13 -0700
6939861: JVM should handle more conversion operations
Reviewed-by: twisti, jrose
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/dependencies.hpp"
29 #include "code/nmethod.hpp"
30 #include "code/pcDesc.hpp"
31 #include "gc_implementation/shared/markSweep.hpp"
32 #include "memory/allocation.inline.hpp"
33 #include "memory/gcLocker.hpp"
34 #include "memory/iterator.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/methodOop.hpp"
37 #include "oops/objArrayOop.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/icache.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "services/memoryService.hpp"
44 #include "utilities/xmlstream.hpp"
46 // Helper class for printing in CodeCache
48 class CodeBlob_sizes {
49 private:
50 int count;
51 int total_size;
52 int header_size;
53 int code_size;
54 int stub_size;
55 int relocation_size;
56 int scopes_oop_size;
57 int scopes_data_size;
58 int scopes_pcs_size;
60 public:
61 CodeBlob_sizes() {
62 count = 0;
63 total_size = 0;
64 header_size = 0;
65 code_size = 0;
66 stub_size = 0;
67 relocation_size = 0;
68 scopes_oop_size = 0;
69 scopes_data_size = 0;
70 scopes_pcs_size = 0;
71 }
73 int total() { return total_size; }
74 bool is_empty() { return count == 0; }
76 void print(const char* title) {
77 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, data %d%%, pcs %d%%])",
78 count,
79 title,
80 total() / K,
81 header_size * 100 / total_size,
82 relocation_size * 100 / total_size,
83 code_size * 100 / total_size,
84 stub_size * 100 / total_size,
85 scopes_oop_size * 100 / total_size,
86 scopes_data_size * 100 / total_size,
87 scopes_pcs_size * 100 / total_size);
88 }
90 void add(CodeBlob* cb) {
91 count++;
92 total_size += cb->size();
93 header_size += cb->header_size();
94 relocation_size += cb->relocation_size();
95 if (cb->is_nmethod()) {
96 nmethod* nm = cb->as_nmethod_or_null();
97 code_size += nm->insts_size();
98 stub_size += nm->stub_size();
100 scopes_oop_size += nm->oops_size();
101 scopes_data_size += nm->scopes_data_size();
102 scopes_pcs_size += nm->scopes_pcs_size();
103 } else {
104 code_size += cb->code_size();
105 }
106 }
107 };
110 // CodeCache implementation
112 CodeHeap * CodeCache::_heap = new CodeHeap();
113 int CodeCache::_number_of_blobs = 0;
114 int CodeCache::_number_of_adapters = 0;
115 int CodeCache::_number_of_nmethods = 0;
116 int CodeCache::_number_of_nmethods_with_dependencies = 0;
117 bool CodeCache::_needs_cache_clean = false;
118 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
119 nmethod* CodeCache::_saved_nmethods = NULL;
122 CodeBlob* CodeCache::first() {
123 assert_locked_or_safepoint(CodeCache_lock);
124 return (CodeBlob*)_heap->first();
125 }
128 CodeBlob* CodeCache::next(CodeBlob* cb) {
129 assert_locked_or_safepoint(CodeCache_lock);
130 return (CodeBlob*)_heap->next(cb);
131 }
134 CodeBlob* CodeCache::alive(CodeBlob *cb) {
135 assert_locked_or_safepoint(CodeCache_lock);
136 while (cb != NULL && !cb->is_alive()) cb = next(cb);
137 return cb;
138 }
141 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
142 assert_locked_or_safepoint(CodeCache_lock);
143 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
144 return (nmethod*)cb;
145 }
147 nmethod* CodeCache::first_nmethod() {
148 assert_locked_or_safepoint(CodeCache_lock);
149 CodeBlob* cb = first();
150 while (cb != NULL && !cb->is_nmethod()) {
151 cb = next(cb);
152 }
153 return (nmethod*)cb;
154 }
156 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
157 assert_locked_or_safepoint(CodeCache_lock);
158 cb = next(cb);
159 while (cb != NULL && !cb->is_nmethod()) {
160 cb = next(cb);
161 }
162 return (nmethod*)cb;
163 }
165 CodeBlob* CodeCache::allocate(int size) {
166 // Do not seize the CodeCache lock here--if the caller has not
167 // already done so, we are going to lose bigtime, since the code
168 // cache will contain a garbage CodeBlob until the caller can
169 // run the constructor for the CodeBlob subclass he is busy
170 // instantiating.
171 guarantee(size >= 0, "allocation request must be reasonable");
172 assert_locked_or_safepoint(CodeCache_lock);
173 CodeBlob* cb = NULL;
174 _number_of_blobs++;
175 while (true) {
176 cb = (CodeBlob*)_heap->allocate(size);
177 if (cb != NULL) break;
178 if (!_heap->expand_by(CodeCacheExpansionSize)) {
179 // Expansion failed
180 return NULL;
181 }
182 if (PrintCodeCacheExtension) {
183 ResourceMark rm;
184 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
185 (intptr_t)_heap->begin(), (intptr_t)_heap->end(),
186 (address)_heap->end() - (address)_heap->begin());
187 }
188 }
189 verify_if_often();
190 print_trace("allocation", cb, size);
191 return cb;
192 }
194 void CodeCache::free(CodeBlob* cb) {
195 assert_locked_or_safepoint(CodeCache_lock);
196 verify_if_often();
198 print_trace("free", cb);
199 if (cb->is_nmethod()) {
200 _number_of_nmethods--;
201 if (((nmethod *)cb)->has_dependencies()) {
202 _number_of_nmethods_with_dependencies--;
203 }
204 }
205 if (cb->is_adapter_blob()) {
206 _number_of_adapters--;
207 }
208 _number_of_blobs--;
210 _heap->deallocate(cb);
212 verify_if_often();
213 assert(_number_of_blobs >= 0, "sanity check");
214 }
217 void CodeCache::commit(CodeBlob* cb) {
218 // this is called by nmethod::nmethod, which must already own CodeCache_lock
219 assert_locked_or_safepoint(CodeCache_lock);
220 if (cb->is_nmethod()) {
221 _number_of_nmethods++;
222 if (((nmethod *)cb)->has_dependencies()) {
223 _number_of_nmethods_with_dependencies++;
224 }
225 }
226 if (cb->is_adapter_blob()) {
227 _number_of_adapters++;
228 }
230 // flush the hardware I-cache
231 ICache::invalidate_range(cb->content_begin(), cb->content_size());
232 }
235 void CodeCache::flush() {
236 assert_locked_or_safepoint(CodeCache_lock);
237 Unimplemented();
238 }
241 // Iteration over CodeBlobs
243 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
244 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
245 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
248 bool CodeCache::contains(void *p) {
249 // It should be ok to call contains without holding a lock
250 return _heap->contains(p);
251 }
254 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
255 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
256 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
257 CodeBlob* CodeCache::find_blob(void* start) {
258 CodeBlob* result = find_blob_unsafe(start);
259 if (result == NULL) return NULL;
260 // We could potientially look up non_entrant methods
261 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
262 return result;
263 }
265 nmethod* CodeCache::find_nmethod(void* start) {
266 CodeBlob *cb = find_blob(start);
267 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
268 return (nmethod*)cb;
269 }
272 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
273 assert_locked_or_safepoint(CodeCache_lock);
274 FOR_ALL_BLOBS(p) {
275 f(p);
276 }
277 }
280 void CodeCache::nmethods_do(void f(nmethod* nm)) {
281 assert_locked_or_safepoint(CodeCache_lock);
282 FOR_ALL_BLOBS(nm) {
283 if (nm->is_nmethod()) f((nmethod*)nm);
284 }
285 }
288 int CodeCache::alignment_unit() {
289 return (int)_heap->alignment_unit();
290 }
293 int CodeCache::alignment_offset() {
294 return (int)_heap->alignment_offset();
295 }
298 // Mark nmethods for unloading if they contain otherwise unreachable
299 // oops.
300 void CodeCache::do_unloading(BoolObjectClosure* is_alive,
301 OopClosure* keep_alive,
302 bool unloading_occurred) {
303 assert_locked_or_safepoint(CodeCache_lock);
304 FOR_ALL_ALIVE_NMETHODS(nm) {
305 nm->do_unloading(is_alive, keep_alive, unloading_occurred);
306 }
307 }
309 void CodeCache::blobs_do(CodeBlobClosure* f) {
310 assert_locked_or_safepoint(CodeCache_lock);
311 FOR_ALL_ALIVE_BLOBS(cb) {
312 f->do_code_blob(cb);
314 #ifdef ASSERT
315 if (cb->is_nmethod())
316 ((nmethod*)cb)->verify_scavenge_root_oops();
317 #endif //ASSERT
318 }
319 }
321 // Walk the list of methods which might contain non-perm oops.
322 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
323 assert_locked_or_safepoint(CodeCache_lock);
324 debug_only(mark_scavenge_root_nmethods());
326 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
327 debug_only(cur->clear_scavenge_root_marked());
328 assert(cur->scavenge_root_not_marked(), "");
329 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
331 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
332 #ifndef PRODUCT
333 if (TraceScavenge) {
334 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
335 }
336 #endif //PRODUCT
337 if (is_live) {
338 // Perform cur->oops_do(f), maybe just once per nmethod.
339 f->do_code_blob(cur);
340 }
341 }
343 // Check for stray marks.
344 debug_only(verify_perm_nmethods(NULL));
345 }
347 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
348 assert_locked_or_safepoint(CodeCache_lock);
349 nm->set_on_scavenge_root_list();
350 nm->set_scavenge_root_link(_scavenge_root_nmethods);
351 set_scavenge_root_nmethods(nm);
352 print_trace("add_scavenge_root", nm);
353 }
355 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
356 assert_locked_or_safepoint(CodeCache_lock);
357 print_trace("drop_scavenge_root", nm);
358 nmethod* last = NULL;
359 nmethod* cur = scavenge_root_nmethods();
360 while (cur != NULL) {
361 nmethod* next = cur->scavenge_root_link();
362 if (cur == nm) {
363 if (last != NULL)
364 last->set_scavenge_root_link(next);
365 else set_scavenge_root_nmethods(next);
366 nm->set_scavenge_root_link(NULL);
367 nm->clear_on_scavenge_root_list();
368 return;
369 }
370 last = cur;
371 cur = next;
372 }
373 assert(false, "should have been on list");
374 }
376 void CodeCache::prune_scavenge_root_nmethods() {
377 assert_locked_or_safepoint(CodeCache_lock);
378 debug_only(mark_scavenge_root_nmethods());
380 nmethod* last = NULL;
381 nmethod* cur = scavenge_root_nmethods();
382 while (cur != NULL) {
383 nmethod* next = cur->scavenge_root_link();
384 debug_only(cur->clear_scavenge_root_marked());
385 assert(cur->scavenge_root_not_marked(), "");
386 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
388 if (!cur->is_zombie() && !cur->is_unloaded()
389 && cur->detect_scavenge_root_oops()) {
390 // Keep it. Advance 'last' to prevent deletion.
391 last = cur;
392 } else {
393 // Prune it from the list, so we don't have to look at it any more.
394 print_trace("prune_scavenge_root", cur);
395 cur->set_scavenge_root_link(NULL);
396 cur->clear_on_scavenge_root_list();
397 if (last != NULL)
398 last->set_scavenge_root_link(next);
399 else set_scavenge_root_nmethods(next);
400 }
401 cur = next;
402 }
404 // Check for stray marks.
405 debug_only(verify_perm_nmethods(NULL));
406 }
408 #ifndef PRODUCT
409 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
410 // While we are here, verify the integrity of the list.
411 mark_scavenge_root_nmethods();
412 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
413 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
414 cur->clear_scavenge_root_marked();
415 }
416 verify_perm_nmethods(f);
417 }
419 // Temporarily mark nmethods that are claimed to be on the non-perm list.
420 void CodeCache::mark_scavenge_root_nmethods() {
421 FOR_ALL_ALIVE_BLOBS(cb) {
422 if (cb->is_nmethod()) {
423 nmethod *nm = (nmethod*)cb;
424 assert(nm->scavenge_root_not_marked(), "clean state");
425 if (nm->on_scavenge_root_list())
426 nm->set_scavenge_root_marked();
427 }
428 }
429 }
431 // If the closure is given, run it on the unlisted nmethods.
432 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
433 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
434 FOR_ALL_ALIVE_BLOBS(cb) {
435 bool call_f = (f_or_null != NULL);
436 if (cb->is_nmethod()) {
437 nmethod *nm = (nmethod*)cb;
438 assert(nm->scavenge_root_not_marked(), "must be already processed");
439 if (nm->on_scavenge_root_list())
440 call_f = false; // don't show this one to the client
441 nm->verify_scavenge_root_oops();
442 } else {
443 call_f = false; // not an nmethod
444 }
445 if (call_f) f_or_null->do_code_blob(cb);
446 }
447 }
448 #endif //PRODUCT
451 nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
452 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
453 nmethod* saved = _saved_nmethods;
454 nmethod* prev = NULL;
455 while (saved != NULL) {
456 if (saved->is_in_use() && saved->method() == m) {
457 if (prev != NULL) {
458 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
459 } else {
460 _saved_nmethods = saved->saved_nmethod_link();
461 }
462 assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
463 saved->set_speculatively_disconnected(false);
464 saved->set_saved_nmethod_link(NULL);
465 if (PrintMethodFlushing) {
466 saved->print_on(tty, " ### nmethod is reconnected\n");
467 }
468 if (LogCompilation && (xtty != NULL)) {
469 ttyLocker ttyl;
470 xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
471 xtty->method(methodOop(m));
472 xtty->stamp();
473 xtty->end_elem();
474 }
475 return saved;
476 }
477 prev = saved;
478 saved = saved->saved_nmethod_link();
479 }
480 return NULL;
481 }
483 void CodeCache::remove_saved_code(nmethod* nm) {
484 // For conc swpr this will be called with CodeCache_lock taken by caller
485 assert_locked_or_safepoint(CodeCache_lock);
486 assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
487 nmethod* saved = _saved_nmethods;
488 nmethod* prev = NULL;
489 while (saved != NULL) {
490 if (saved == nm) {
491 if (prev != NULL) {
492 prev->set_saved_nmethod_link(saved->saved_nmethod_link());
493 } else {
494 _saved_nmethods = saved->saved_nmethod_link();
495 }
496 if (LogCompilation && (xtty != NULL)) {
497 ttyLocker ttyl;
498 xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
499 xtty->stamp();
500 xtty->end_elem();
501 }
502 return;
503 }
504 prev = saved;
505 saved = saved->saved_nmethod_link();
506 }
507 ShouldNotReachHere();
508 }
510 void CodeCache::speculatively_disconnect(nmethod* nm) {
511 assert_locked_or_safepoint(CodeCache_lock);
512 assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
513 nm->set_saved_nmethod_link(_saved_nmethods);
514 _saved_nmethods = nm;
515 if (PrintMethodFlushing) {
516 nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
517 }
518 if (LogCompilation && (xtty != NULL)) {
519 ttyLocker ttyl;
520 xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
521 xtty->method(methodOop(nm->method()));
522 xtty->stamp();
523 xtty->end_elem();
524 }
525 nm->method()->clear_code();
526 nm->set_speculatively_disconnected(true);
527 }
530 void CodeCache::gc_prologue() {
531 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
532 }
535 void CodeCache::gc_epilogue() {
536 assert_locked_or_safepoint(CodeCache_lock);
537 FOR_ALL_ALIVE_BLOBS(cb) {
538 if (cb->is_nmethod()) {
539 nmethod *nm = (nmethod*)cb;
540 assert(!nm->is_unloaded(), "Tautology");
541 if (needs_cache_clean()) {
542 nm->cleanup_inline_caches();
543 }
544 DEBUG_ONLY(nm->verify());
545 nm->fix_oop_relocations();
546 }
547 }
548 set_needs_cache_clean(false);
549 prune_scavenge_root_nmethods();
550 assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
551 }
554 void CodeCache::verify_oops() {
555 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
556 VerifyOopClosure voc;
557 FOR_ALL_ALIVE_BLOBS(cb) {
558 if (cb->is_nmethod()) {
559 nmethod *nm = (nmethod*)cb;
560 nm->oops_do(&voc);
561 nm->verify_oop_relocations();
562 }
563 }
564 }
567 address CodeCache::first_address() {
568 assert_locked_or_safepoint(CodeCache_lock);
569 return (address)_heap->begin();
570 }
573 address CodeCache::last_address() {
574 assert_locked_or_safepoint(CodeCache_lock);
575 return (address)_heap->end();
576 }
579 void icache_init();
581 void CodeCache::initialize() {
582 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
583 #ifdef COMPILER2
584 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
585 #endif
586 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
587 // This was originally just a check of the alignment, causing failure, instead, round
588 // the code cache to the page size. In particular, Solaris is moving to a larger
589 // default page size.
590 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
591 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
592 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
593 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
594 vm_exit_during_initialization("Could not reserve enough space for code cache");
595 }
597 MemoryService::add_code_heap_memory_pool(_heap);
599 // Initialize ICache flush mechanism
600 // This service is needed for os::register_code_area
601 icache_init();
603 // Give OS a chance to register generated code area.
604 // This is used on Windows 64 bit platforms to register
605 // Structured Exception Handlers for our generated code.
606 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
607 }
610 void codeCache_init() {
611 CodeCache::initialize();
612 }
614 //------------------------------------------------------------------------------------------------
616 int CodeCache::number_of_nmethods_with_dependencies() {
617 return _number_of_nmethods_with_dependencies;
618 }
620 void CodeCache::clear_inline_caches() {
621 assert_locked_or_safepoint(CodeCache_lock);
622 FOR_ALL_ALIVE_NMETHODS(nm) {
623 nm->clear_inline_caches();
624 }
625 }
627 #ifndef PRODUCT
628 // used to keep track of how much time is spent in mark_for_deoptimization
629 static elapsedTimer dependentCheckTime;
630 static int dependentCheckCount = 0;
631 #endif // PRODUCT
634 int CodeCache::mark_for_deoptimization(DepChange& changes) {
635 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
637 #ifndef PRODUCT
638 dependentCheckTime.start();
639 dependentCheckCount++;
640 #endif // PRODUCT
642 int number_of_marked_CodeBlobs = 0;
644 // search the hierarchy looking for nmethods which are affected by the loading of this class
646 // then search the interfaces this class implements looking for nmethods
647 // which might be dependent of the fact that an interface only had one
648 // implementor.
650 { No_Safepoint_Verifier nsv;
651 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
652 klassOop d = str.klass();
653 number_of_marked_CodeBlobs += instanceKlass::cast(d)->mark_dependent_nmethods(changes);
654 }
655 }
657 if (VerifyDependencies) {
658 // Turn off dependency tracing while actually testing deps.
659 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
660 FOR_ALL_ALIVE_NMETHODS(nm) {
661 if (!nm->is_marked_for_deoptimization() &&
662 nm->check_all_dependencies()) {
663 ResourceMark rm;
664 tty->print_cr("Should have been marked for deoptimization:");
665 changes.print();
666 nm->print();
667 nm->print_dependencies();
668 }
669 }
670 }
672 #ifndef PRODUCT
673 dependentCheckTime.stop();
674 #endif // PRODUCT
676 return number_of_marked_CodeBlobs;
677 }
680 #ifdef HOTSWAP
681 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
682 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
683 int number_of_marked_CodeBlobs = 0;
685 // Deoptimize all methods of the evolving class itself
686 objArrayOop old_methods = dependee->methods();
687 for (int i = 0; i < old_methods->length(); i++) {
688 ResourceMark rm;
689 methodOop old_method = (methodOop) old_methods->obj_at(i);
690 nmethod *nm = old_method->code();
691 if (nm != NULL) {
692 nm->mark_for_deoptimization();
693 number_of_marked_CodeBlobs++;
694 }
695 }
697 FOR_ALL_ALIVE_NMETHODS(nm) {
698 if (nm->is_marked_for_deoptimization()) {
699 // ...Already marked in the previous pass; don't count it again.
700 } else if (nm->is_evol_dependent_on(dependee())) {
701 ResourceMark rm;
702 nm->mark_for_deoptimization();
703 number_of_marked_CodeBlobs++;
704 } else {
705 // flush caches in case they refer to a redefined methodOop
706 nm->clear_inline_caches();
707 }
708 }
710 return number_of_marked_CodeBlobs;
711 }
712 #endif // HOTSWAP
715 // Deoptimize all methods
716 void CodeCache::mark_all_nmethods_for_deoptimization() {
717 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
718 FOR_ALL_ALIVE_NMETHODS(nm) {
719 nm->mark_for_deoptimization();
720 }
721 }
724 int CodeCache::mark_for_deoptimization(methodOop dependee) {
725 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
726 int number_of_marked_CodeBlobs = 0;
728 FOR_ALL_ALIVE_NMETHODS(nm) {
729 if (nm->is_dependent_on_method(dependee)) {
730 ResourceMark rm;
731 nm->mark_for_deoptimization();
732 number_of_marked_CodeBlobs++;
733 }
734 }
736 return number_of_marked_CodeBlobs;
737 }
739 void CodeCache::make_marked_nmethods_zombies() {
740 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
741 FOR_ALL_ALIVE_NMETHODS(nm) {
742 if (nm->is_marked_for_deoptimization()) {
744 // If the nmethod has already been made non-entrant and it can be converted
745 // then zombie it now. Otherwise make it non-entrant and it will eventually
746 // be zombied when it is no longer seen on the stack. Note that the nmethod
747 // might be "entrant" and not on the stack and so could be zombied immediately
748 // but we can't tell because we don't track it on stack until it becomes
749 // non-entrant.
751 if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) {
752 nm->make_zombie();
753 } else {
754 nm->make_not_entrant();
755 }
756 }
757 }
758 }
760 void CodeCache::make_marked_nmethods_not_entrant() {
761 assert_locked_or_safepoint(CodeCache_lock);
762 FOR_ALL_ALIVE_NMETHODS(nm) {
763 if (nm->is_marked_for_deoptimization()) {
764 nm->make_not_entrant();
765 }
766 }
767 }
769 void CodeCache::verify() {
770 _heap->verify();
771 FOR_ALL_ALIVE_BLOBS(p) {
772 p->verify();
773 }
774 }
776 //------------------------------------------------------------------------------------------------
777 // Non-product version
779 #ifndef PRODUCT
781 void CodeCache::verify_if_often() {
782 if (VerifyCodeCacheOften) {
783 _heap->verify();
784 }
785 }
787 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
788 if (PrintCodeCache2) { // Need to add a new flag
789 ResourceMark rm;
790 if (size == 0) size = cb->size();
791 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, cb, size);
792 }
793 }
795 void CodeCache::print_internals() {
796 int nmethodCount = 0;
797 int runtimeStubCount = 0;
798 int adapterCount = 0;
799 int ricochetStubCount = 0;
800 int deoptimizationStubCount = 0;
801 int uncommonTrapStubCount = 0;
802 int bufferBlobCount = 0;
803 int total = 0;
804 int nmethodAlive = 0;
805 int nmethodNotEntrant = 0;
806 int nmethodZombie = 0;
807 int nmethodUnloaded = 0;
808 int nmethodJava = 0;
809 int nmethodNative = 0;
810 int maxCodeSize = 0;
811 ResourceMark rm;
813 CodeBlob *cb;
814 for (cb = first(); cb != NULL; cb = next(cb)) {
815 total++;
816 if (cb->is_nmethod()) {
817 nmethod* nm = (nmethod*)cb;
819 if (Verbose && nm->method() != NULL) {
820 ResourceMark rm;
821 char *method_name = nm->method()->name_and_sig_as_C_string();
822 tty->print("%s", method_name);
823 if(nm->is_alive()) { tty->print_cr(" alive"); }
824 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
825 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
826 }
828 nmethodCount++;
830 if(nm->is_alive()) { nmethodAlive++; }
831 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
832 if(nm->is_zombie()) { nmethodZombie++; }
833 if(nm->is_unloaded()) { nmethodUnloaded++; }
834 if(nm->is_native_method()) { nmethodNative++; }
836 if(nm->method() != NULL && nm->is_java_method()) {
837 nmethodJava++;
838 if (nm->insts_size() > maxCodeSize) {
839 maxCodeSize = nm->insts_size();
840 }
841 }
842 } else if (cb->is_runtime_stub()) {
843 runtimeStubCount++;
844 } else if (cb->is_ricochet_stub()) {
845 ricochetStubCount++;
846 } else if (cb->is_deoptimization_stub()) {
847 deoptimizationStubCount++;
848 } else if (cb->is_uncommon_trap_stub()) {
849 uncommonTrapStubCount++;
850 } else if (cb->is_adapter_blob()) {
851 adapterCount++;
852 } else if (cb->is_buffer_blob()) {
853 bufferBlobCount++;
854 }
855 }
857 int bucketSize = 512;
858 int bucketLimit = maxCodeSize / bucketSize + 1;
859 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit);
860 memset(buckets,0,sizeof(int) * bucketLimit);
862 for (cb = first(); cb != NULL; cb = next(cb)) {
863 if (cb->is_nmethod()) {
864 nmethod* nm = (nmethod*)cb;
865 if(nm->is_java_method()) {
866 buckets[nm->insts_size() / bucketSize]++;
867 }
868 }
869 }
870 tty->print_cr("Code Cache Entries (total of %d)",total);
871 tty->print_cr("-------------------------------------------------");
872 tty->print_cr("nmethods: %d",nmethodCount);
873 tty->print_cr("\talive: %d",nmethodAlive);
874 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
875 tty->print_cr("\tzombie: %d",nmethodZombie);
876 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
877 tty->print_cr("\tjava: %d",nmethodJava);
878 tty->print_cr("\tnative: %d",nmethodNative);
879 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
880 tty->print_cr("adapters: %d",adapterCount);
881 tty->print_cr("buffer blobs: %d",bufferBlobCount);
882 tty->print_cr("ricochet_stubs: %d",ricochetStubCount);
883 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
884 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
885 tty->print_cr("\nnmethod size distribution (non-zombie java)");
886 tty->print_cr("-------------------------------------------------");
888 for(int i=0; i<bucketLimit; i++) {
889 if(buckets[i] != 0) {
890 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
891 tty->fill_to(40);
892 tty->print_cr("%d",buckets[i]);
893 }
894 }
896 FREE_C_HEAP_ARRAY(int, buckets);
897 }
899 void CodeCache::print() {
900 CodeBlob_sizes live;
901 CodeBlob_sizes dead;
903 FOR_ALL_BLOBS(p) {
904 if (!p->is_alive()) {
905 dead.add(p);
906 } else {
907 live.add(p);
908 }
909 }
911 tty->print_cr("CodeCache:");
913 tty->print_cr("nmethod dependency checking time %f", dependentCheckTime.seconds(),
914 dependentCheckTime.seconds() / dependentCheckCount);
916 if (!live.is_empty()) {
917 live.print("live");
918 }
919 if (!dead.is_empty()) {
920 dead.print("dead");
921 }
924 if (Verbose) {
925 // print the oop_map usage
926 int code_size = 0;
927 int number_of_blobs = 0;
928 int number_of_oop_maps = 0;
929 int map_size = 0;
930 FOR_ALL_BLOBS(p) {
931 if (p->is_alive()) {
932 number_of_blobs++;
933 code_size += p->code_size();
934 OopMapSet* set = p->oop_maps();
935 if (set != NULL) {
936 number_of_oop_maps += set->size();
937 map_size += set->heap_size();
938 }
939 }
940 }
941 tty->print_cr("OopMaps");
942 tty->print_cr(" #blobs = %d", number_of_blobs);
943 tty->print_cr(" code size = %d", code_size);
944 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
945 tty->print_cr(" map size = %d", map_size);
946 }
948 }
950 #endif // PRODUCT
952 void CodeCache::print_bounds(outputStream* st) {
953 st->print_cr("Code Cache [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
954 _heap->low_boundary(),
955 _heap->high(),
956 _heap->high_boundary());
957 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
958 " adapters=" UINT32_FORMAT " free_code_cache=" SIZE_FORMAT "Kb"
959 " largest_free_block=" SIZE_FORMAT,
960 nof_blobs(), nof_nmethods(), nof_adapters(),
961 unallocated_capacity()/K, largest_free_block());
962 }
964 void CodeCache::log_state(outputStream* st) {
965 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
966 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'"
967 " largest_free_block='" SIZE_FORMAT "'",
968 nof_blobs(), nof_nmethods(), nof_adapters(),
969 unallocated_capacity(), largest_free_block());
970 }
972 size_t CodeCache::largest_free_block() {
973 // This is called both with and without CodeCache_lock held so
974 // handle both cases.
975 if (CodeCache_lock->owned_by_self()) {
976 return _heap->largest_free_block();
977 } else {
978 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
979 return _heap->largest_free_block();
980 }
981 }