Wed, 28 Feb 2018 05:31:04 +0000
8078628: linux-zero does not build without precompiled header
Summary: add missing includes
Reviewed-by: coleenp, stefank, sgehwolf, dholmes
1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "code/codeBlob.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/dependencies.hpp"
30 #include "code/icBuffer.hpp"
31 #include "code/nmethod.hpp"
32 #include "code/pcDesc.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "gc_implementation/shared/markSweep.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/gcLocker.hpp"
37 #include "memory/iterator.hpp"
38 #include "memory/resourceArea.hpp"
39 #include "oops/method.hpp"
40 #include "oops/objArrayOop.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/icache.hpp"
46 #include "runtime/java.hpp"
47 #include "runtime/mutexLocker.hpp"
48 #include "services/memoryService.hpp"
49 #include "trace/tracing.hpp"
50 #include "utilities/xmlstream.hpp"
52 // Helper class for printing in CodeCache
54 class CodeBlob_sizes {
55 private:
56 int count;
57 int total_size;
58 int header_size;
59 int code_size;
60 int stub_size;
61 int relocation_size;
62 int scopes_oop_size;
63 int scopes_metadata_size;
64 int scopes_data_size;
65 int scopes_pcs_size;
67 public:
68 CodeBlob_sizes() {
69 count = 0;
70 total_size = 0;
71 header_size = 0;
72 code_size = 0;
73 stub_size = 0;
74 relocation_size = 0;
75 scopes_oop_size = 0;
76 scopes_metadata_size = 0;
77 scopes_data_size = 0;
78 scopes_pcs_size = 0;
79 }
81 int total() { return total_size; }
82 bool is_empty() { return count == 0; }
84 void print(const char* title) {
85 tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])",
86 count,
87 title,
88 (int)(total() / K),
89 header_size * 100 / total_size,
90 relocation_size * 100 / total_size,
91 code_size * 100 / total_size,
92 stub_size * 100 / total_size,
93 scopes_oop_size * 100 / total_size,
94 scopes_metadata_size * 100 / total_size,
95 scopes_data_size * 100 / total_size,
96 scopes_pcs_size * 100 / total_size);
97 }
99 void add(CodeBlob* cb) {
100 count++;
101 total_size += cb->size();
102 header_size += cb->header_size();
103 relocation_size += cb->relocation_size();
104 if (cb->is_nmethod()) {
105 nmethod* nm = cb->as_nmethod_or_null();
106 code_size += nm->insts_size();
107 stub_size += nm->stub_size();
109 scopes_oop_size += nm->oops_size();
110 scopes_metadata_size += nm->metadata_size();
111 scopes_data_size += nm->scopes_data_size();
112 scopes_pcs_size += nm->scopes_pcs_size();
113 } else {
114 code_size += cb->code_size();
115 }
116 }
117 };
119 // CodeCache implementation
121 CodeHeap * CodeCache::_heap = new CodeHeap();
122 int CodeCache::_number_of_blobs = 0;
123 int CodeCache::_number_of_adapters = 0;
124 int CodeCache::_number_of_nmethods = 0;
125 int CodeCache::_number_of_nmethods_with_dependencies = 0;
126 bool CodeCache::_needs_cache_clean = false;
127 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
129 int CodeCache::_codemem_full_count = 0;
131 CodeBlob* CodeCache::first() {
132 assert_locked_or_safepoint(CodeCache_lock);
133 return (CodeBlob*)_heap->first();
134 }
137 CodeBlob* CodeCache::next(CodeBlob* cb) {
138 assert_locked_or_safepoint(CodeCache_lock);
139 return (CodeBlob*)_heap->next(cb);
140 }
143 CodeBlob* CodeCache::alive(CodeBlob *cb) {
144 assert_locked_or_safepoint(CodeCache_lock);
145 while (cb != NULL && !cb->is_alive()) cb = next(cb);
146 return cb;
147 }
150 nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
151 assert_locked_or_safepoint(CodeCache_lock);
152 while (cb != NULL && (!cb->is_alive() || !cb->is_nmethod())) cb = next(cb);
153 return (nmethod*)cb;
154 }
156 nmethod* CodeCache::first_nmethod() {
157 assert_locked_or_safepoint(CodeCache_lock);
158 CodeBlob* cb = first();
159 while (cb != NULL && !cb->is_nmethod()) {
160 cb = next(cb);
161 }
162 return (nmethod*)cb;
163 }
165 nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
166 assert_locked_or_safepoint(CodeCache_lock);
167 cb = next(cb);
168 while (cb != NULL && !cb->is_nmethod()) {
169 cb = next(cb);
170 }
171 return (nmethod*)cb;
172 }
174 static size_t maxCodeCacheUsed = 0;
176 CodeBlob* CodeCache::allocate(int size, bool is_critical) {
177 // Do not seize the CodeCache lock here--if the caller has not
178 // already done so, we are going to lose bigtime, since the code
179 // cache will contain a garbage CodeBlob until the caller can
180 // run the constructor for the CodeBlob subclass he is busy
181 // instantiating.
182 guarantee(size >= 0, "allocation request must be reasonable");
183 assert_locked_or_safepoint(CodeCache_lock);
184 CodeBlob* cb = NULL;
185 _number_of_blobs++;
186 while (true) {
187 cb = (CodeBlob*)_heap->allocate(size, is_critical);
188 if (cb != NULL) break;
189 if (!_heap->expand_by(CodeCacheExpansionSize)) {
190 // Expansion failed
191 return NULL;
192 }
193 if (PrintCodeCacheExtension) {
194 ResourceMark rm;
195 tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)",
196 (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
197 (address)_heap->high() - (address)_heap->low_boundary());
198 }
199 }
200 maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
201 (address)_heap->low_boundary()) - unallocated_capacity());
202 verify_if_often();
203 print_trace("allocation", cb, size);
204 return cb;
205 }
207 void CodeCache::free(CodeBlob* cb) {
208 assert_locked_or_safepoint(CodeCache_lock);
209 verify_if_often();
211 print_trace("free", cb);
212 if (cb->is_nmethod()) {
213 _number_of_nmethods--;
214 if (((nmethod *)cb)->has_dependencies()) {
215 _number_of_nmethods_with_dependencies--;
216 }
217 }
218 if (cb->is_adapter_blob()) {
219 _number_of_adapters--;
220 }
221 _number_of_blobs--;
223 _heap->deallocate(cb);
225 verify_if_often();
226 assert(_number_of_blobs >= 0, "sanity check");
227 }
230 void CodeCache::commit(CodeBlob* cb) {
231 // this is called by nmethod::nmethod, which must already own CodeCache_lock
232 assert_locked_or_safepoint(CodeCache_lock);
233 if (cb->is_nmethod()) {
234 _number_of_nmethods++;
235 if (((nmethod *)cb)->has_dependencies()) {
236 _number_of_nmethods_with_dependencies++;
237 }
238 }
239 if (cb->is_adapter_blob()) {
240 _number_of_adapters++;
241 }
243 // flush the hardware I-cache
244 ICache::invalidate_range(cb->content_begin(), cb->content_size());
245 }
248 void CodeCache::flush() {
249 assert_locked_or_safepoint(CodeCache_lock);
250 Unimplemented();
251 }
254 // Iteration over CodeBlobs
256 #define FOR_ALL_BLOBS(var) for (CodeBlob *var = first() ; var != NULL; var = next(var) )
257 #define FOR_ALL_ALIVE_BLOBS(var) for (CodeBlob *var = alive(first()); var != NULL; var = alive(next(var)))
258 #define FOR_ALL_ALIVE_NMETHODS(var) for (nmethod *var = alive_nmethod(first()); var != NULL; var = alive_nmethod(next(var)))
261 bool CodeCache::contains(void *p) {
262 // It should be ok to call contains without holding a lock
263 return _heap->contains(p);
264 }
267 // This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
268 // looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
269 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
270 CodeBlob* CodeCache::find_blob(void* start) {
271 CodeBlob* result = find_blob_unsafe(start);
272 if (result == NULL) return NULL;
273 // We could potientially look up non_entrant methods
274 guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
275 return result;
276 }
278 nmethod* CodeCache::find_nmethod(void* start) {
279 CodeBlob *cb = find_blob(start);
280 assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
281 return (nmethod*)cb;
282 }
285 void CodeCache::blobs_do(void f(CodeBlob* nm)) {
286 assert_locked_or_safepoint(CodeCache_lock);
287 FOR_ALL_BLOBS(p) {
288 f(p);
289 }
290 }
293 void CodeCache::nmethods_do(void f(nmethod* nm)) {
294 assert_locked_or_safepoint(CodeCache_lock);
295 FOR_ALL_BLOBS(nm) {
296 if (nm->is_nmethod()) f((nmethod*)nm);
297 }
298 }
300 void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
301 assert_locked_or_safepoint(CodeCache_lock);
302 FOR_ALL_ALIVE_NMETHODS(nm) {
303 f(nm);
304 }
305 }
307 int CodeCache::alignment_unit() {
308 return (int)_heap->alignment_unit();
309 }
312 int CodeCache::alignment_offset() {
313 return (int)_heap->alignment_offset();
314 }
317 // Mark nmethods for unloading if they contain otherwise unreachable
318 // oops.
319 void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
320 assert_locked_or_safepoint(CodeCache_lock);
321 FOR_ALL_ALIVE_NMETHODS(nm) {
322 nm->do_unloading(is_alive, unloading_occurred);
323 }
324 }
326 void CodeCache::blobs_do(CodeBlobClosure* f) {
327 assert_locked_or_safepoint(CodeCache_lock);
328 FOR_ALL_ALIVE_BLOBS(cb) {
329 f->do_code_blob(cb);
331 #ifdef ASSERT
332 if (cb->is_nmethod())
333 ((nmethod*)cb)->verify_scavenge_root_oops();
334 #endif //ASSERT
335 }
336 }
338 // Walk the list of methods which might contain non-perm oops.
339 void CodeCache::scavenge_root_nmethods_do(CodeBlobClosure* f) {
340 assert_locked_or_safepoint(CodeCache_lock);
342 if (UseG1GC) {
343 return;
344 }
346 debug_only(mark_scavenge_root_nmethods());
348 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
349 debug_only(cur->clear_scavenge_root_marked());
350 assert(cur->scavenge_root_not_marked(), "");
351 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
353 bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
354 #ifndef PRODUCT
355 if (TraceScavenge) {
356 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
357 }
358 #endif //PRODUCT
359 if (is_live) {
360 // Perform cur->oops_do(f), maybe just once per nmethod.
361 f->do_code_blob(cur);
362 }
363 }
365 // Check for stray marks.
366 debug_only(verify_perm_nmethods(NULL));
367 }
369 void CodeCache::add_scavenge_root_nmethod(nmethod* nm) {
370 assert_locked_or_safepoint(CodeCache_lock);
372 if (UseG1GC) {
373 return;
374 }
376 nm->set_on_scavenge_root_list();
377 nm->set_scavenge_root_link(_scavenge_root_nmethods);
378 set_scavenge_root_nmethods(nm);
379 print_trace("add_scavenge_root", nm);
380 }
382 void CodeCache::drop_scavenge_root_nmethod(nmethod* nm) {
383 assert_locked_or_safepoint(CodeCache_lock);
385 if (UseG1GC) {
386 return;
387 }
389 print_trace("drop_scavenge_root", nm);
390 nmethod* last = NULL;
391 nmethod* cur = scavenge_root_nmethods();
392 while (cur != NULL) {
393 nmethod* next = cur->scavenge_root_link();
394 if (cur == nm) {
395 if (last != NULL)
396 last->set_scavenge_root_link(next);
397 else set_scavenge_root_nmethods(next);
398 nm->set_scavenge_root_link(NULL);
399 nm->clear_on_scavenge_root_list();
400 return;
401 }
402 last = cur;
403 cur = next;
404 }
405 assert(false, "should have been on list");
406 }
408 void CodeCache::prune_scavenge_root_nmethods() {
409 assert_locked_or_safepoint(CodeCache_lock);
411 if (UseG1GC) {
412 return;
413 }
415 debug_only(mark_scavenge_root_nmethods());
417 nmethod* last = NULL;
418 nmethod* cur = scavenge_root_nmethods();
419 while (cur != NULL) {
420 nmethod* next = cur->scavenge_root_link();
421 debug_only(cur->clear_scavenge_root_marked());
422 assert(cur->scavenge_root_not_marked(), "");
423 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
425 if (!cur->is_zombie() && !cur->is_unloaded()
426 && cur->detect_scavenge_root_oops()) {
427 // Keep it. Advance 'last' to prevent deletion.
428 last = cur;
429 } else {
430 // Prune it from the list, so we don't have to look at it any more.
431 print_trace("prune_scavenge_root", cur);
432 cur->set_scavenge_root_link(NULL);
433 cur->clear_on_scavenge_root_list();
434 if (last != NULL)
435 last->set_scavenge_root_link(next);
436 else set_scavenge_root_nmethods(next);
437 }
438 cur = next;
439 }
441 // Check for stray marks.
442 debug_only(verify_perm_nmethods(NULL));
443 }
445 #ifndef PRODUCT
446 void CodeCache::asserted_non_scavengable_nmethods_do(CodeBlobClosure* f) {
447 if (UseG1GC) {
448 return;
449 }
451 // While we are here, verify the integrity of the list.
452 mark_scavenge_root_nmethods();
453 for (nmethod* cur = scavenge_root_nmethods(); cur != NULL; cur = cur->scavenge_root_link()) {
454 assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
455 cur->clear_scavenge_root_marked();
456 }
457 verify_perm_nmethods(f);
458 }
460 // Temporarily mark nmethods that are claimed to be on the non-perm list.
461 void CodeCache::mark_scavenge_root_nmethods() {
462 FOR_ALL_ALIVE_BLOBS(cb) {
463 if (cb->is_nmethod()) {
464 nmethod *nm = (nmethod*)cb;
465 assert(nm->scavenge_root_not_marked(), "clean state");
466 if (nm->on_scavenge_root_list())
467 nm->set_scavenge_root_marked();
468 }
469 }
470 }
472 // If the closure is given, run it on the unlisted nmethods.
473 // Also make sure that the effects of mark_scavenge_root_nmethods is gone.
474 void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
475 FOR_ALL_ALIVE_BLOBS(cb) {
476 bool call_f = (f_or_null != NULL);
477 if (cb->is_nmethod()) {
478 nmethod *nm = (nmethod*)cb;
479 assert(nm->scavenge_root_not_marked(), "must be already processed");
480 if (nm->on_scavenge_root_list())
481 call_f = false; // don't show this one to the client
482 nm->verify_scavenge_root_oops();
483 } else {
484 call_f = false; // not an nmethod
485 }
486 if (call_f) f_or_null->do_code_blob(cb);
487 }
488 }
489 #endif //PRODUCT
491 void CodeCache::verify_clean_inline_caches() {
492 #ifdef ASSERT
493 FOR_ALL_ALIVE_BLOBS(cb) {
494 if (cb->is_nmethod()) {
495 nmethod* nm = (nmethod*)cb;
496 assert(!nm->is_unloaded(), "Tautology");
497 nm->verify_clean_inline_caches();
498 nm->verify();
499 }
500 }
501 #endif
502 }
504 void CodeCache::verify_icholder_relocations() {
505 #ifdef ASSERT
506 // make sure that we aren't leaking icholders
507 int count = 0;
508 FOR_ALL_BLOBS(cb) {
509 if (cb->is_nmethod()) {
510 nmethod* nm = (nmethod*)cb;
511 count += nm->verify_icholder_relocations();
512 }
513 }
515 assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
516 CompiledICHolder::live_count(), "must agree");
517 #endif
518 }
520 void CodeCache::gc_prologue() {
521 }
523 void CodeCache::gc_epilogue() {
524 assert_locked_or_safepoint(CodeCache_lock);
525 NOT_DEBUG(if (needs_cache_clean())) {
526 FOR_ALL_ALIVE_BLOBS(cb) {
527 if (cb->is_nmethod()) {
528 nmethod *nm = (nmethod*)cb;
529 assert(!nm->is_unloaded(), "Tautology");
530 DEBUG_ONLY(if (needs_cache_clean())) {
531 nm->cleanup_inline_caches();
532 }
533 DEBUG_ONLY(nm->verify());
534 DEBUG_ONLY(nm->verify_oop_relocations());
535 }
536 }
537 }
538 set_needs_cache_clean(false);
539 prune_scavenge_root_nmethods();
541 verify_icholder_relocations();
542 }
544 void CodeCache::verify_oops() {
545 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
546 VerifyOopClosure voc;
547 FOR_ALL_ALIVE_BLOBS(cb) {
548 if (cb->is_nmethod()) {
549 nmethod *nm = (nmethod*)cb;
550 nm->oops_do(&voc);
551 nm->verify_oop_relocations();
552 }
553 }
554 }
557 address CodeCache::first_address() {
558 assert_locked_or_safepoint(CodeCache_lock);
559 return (address)_heap->low_boundary();
560 }
563 address CodeCache::last_address() {
564 assert_locked_or_safepoint(CodeCache_lock);
565 return (address)_heap->high();
566 }
568 /**
569 * Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache
570 * is free, reverse_free_ratio() returns 4.
571 */
572 double CodeCache::reverse_free_ratio() {
573 double unallocated_capacity = (double)(CodeCache::unallocated_capacity() - CodeCacheMinimumFreeSpace);
574 double max_capacity = (double)CodeCache::max_capacity();
575 return max_capacity / unallocated_capacity;
576 }
578 void icache_init();
580 void CodeCache::initialize() {
581 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points");
582 #ifdef COMPILER2
583 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops");
584 #endif
585 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants");
586 // This was originally just a check of the alignment, causing failure, instead, round
587 // the code cache to the page size. In particular, Solaris is moving to a larger
588 // default page size.
589 CodeCacheExpansionSize = round_to(CodeCacheExpansionSize, os::vm_page_size());
590 InitialCodeCacheSize = round_to(InitialCodeCacheSize, os::vm_page_size());
591 ReservedCodeCacheSize = round_to(ReservedCodeCacheSize, os::vm_page_size());
592 if (!_heap->reserve(ReservedCodeCacheSize, InitialCodeCacheSize, CodeCacheSegmentSize)) {
593 vm_exit_during_initialization("Could not reserve enough space for code cache");
594 }
596 MemoryService::add_code_heap_memory_pool(_heap);
598 // Initialize ICache flush mechanism
599 // This service is needed for os::register_code_area
600 icache_init();
602 // Give OS a chance to register generated code area.
603 // This is used on Windows 64 bit platforms to register
604 // Structured Exception Handlers for our generated code.
605 os::register_code_area(_heap->low_boundary(), _heap->high_boundary());
606 }
609 void codeCache_init() {
610 CodeCache::initialize();
611 }
613 //------------------------------------------------------------------------------------------------
615 int CodeCache::number_of_nmethods_with_dependencies() {
616 return _number_of_nmethods_with_dependencies;
617 }
619 void CodeCache::clear_inline_caches() {
620 assert_locked_or_safepoint(CodeCache_lock);
621 FOR_ALL_ALIVE_NMETHODS(nm) {
622 nm->clear_inline_caches();
623 }
624 }
626 #ifndef PRODUCT
627 // used to keep track of how much time is spent in mark_for_deoptimization
628 static elapsedTimer dependentCheckTime;
629 static int dependentCheckCount = 0;
630 #endif // PRODUCT
633 int CodeCache::mark_for_deoptimization(DepChange& changes) {
634 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
636 #ifndef PRODUCT
637 dependentCheckTime.start();
638 dependentCheckCount++;
639 #endif // PRODUCT
641 int number_of_marked_CodeBlobs = 0;
643 // search the hierarchy looking for nmethods which are affected by the loading of this class
645 // then search the interfaces this class implements looking for nmethods
646 // which might be dependent of the fact that an interface only had one
647 // implementor.
649 { No_Safepoint_Verifier nsv;
650 for (DepChange::ContextStream str(changes, nsv); str.next(); ) {
651 Klass* d = str.klass();
652 number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes);
653 }
654 }
656 if (VerifyDependencies) {
657 // Turn off dependency tracing while actually testing deps.
658 NOT_PRODUCT( FlagSetting fs(TraceDependencies, false) );
659 FOR_ALL_ALIVE_NMETHODS(nm) {
660 if (!nm->is_marked_for_deoptimization() &&
661 nm->check_all_dependencies()) {
662 ResourceMark rm;
663 tty->print_cr("Should have been marked for deoptimization:");
664 changes.print();
665 nm->print();
666 nm->print_dependencies();
667 }
668 }
669 }
671 #ifndef PRODUCT
672 dependentCheckTime.stop();
673 #endif // PRODUCT
675 return number_of_marked_CodeBlobs;
676 }
679 #ifdef HOTSWAP
680 int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
681 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
682 int number_of_marked_CodeBlobs = 0;
684 // Deoptimize all methods of the evolving class itself
685 Array<Method*>* old_methods = dependee->methods();
686 for (int i = 0; i < old_methods->length(); i++) {
687 ResourceMark rm;
688 Method* old_method = old_methods->at(i);
689 nmethod *nm = old_method->code();
690 if (nm != NULL) {
691 nm->mark_for_deoptimization();
692 number_of_marked_CodeBlobs++;
693 }
694 }
696 FOR_ALL_ALIVE_NMETHODS(nm) {
697 if (nm->is_marked_for_deoptimization()) {
698 // ...Already marked in the previous pass; don't count it again.
699 } else if (nm->is_evol_dependent_on(dependee())) {
700 ResourceMark rm;
701 nm->mark_for_deoptimization();
702 number_of_marked_CodeBlobs++;
703 } else {
704 // flush caches in case they refer to a redefined Method*
705 nm->clear_inline_caches();
706 }
707 }
709 return number_of_marked_CodeBlobs;
710 }
711 #endif // HOTSWAP
714 // Deoptimize all methods
715 void CodeCache::mark_all_nmethods_for_deoptimization() {
716 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
717 FOR_ALL_ALIVE_NMETHODS(nm) {
718 if (!nm->method()->is_method_handle_intrinsic()) {
719 nm->mark_for_deoptimization();
720 }
721 }
722 }
725 int CodeCache::mark_for_deoptimization(Method* dependee) {
726 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
727 int number_of_marked_CodeBlobs = 0;
729 FOR_ALL_ALIVE_NMETHODS(nm) {
730 if (nm->is_dependent_on_method(dependee)) {
731 ResourceMark rm;
732 nm->mark_for_deoptimization();
733 number_of_marked_CodeBlobs++;
734 }
735 }
737 return number_of_marked_CodeBlobs;
738 }
740 void CodeCache::make_marked_nmethods_not_entrant() {
741 assert_locked_or_safepoint(CodeCache_lock);
742 FOR_ALL_ALIVE_NMETHODS(nm) {
743 if (nm->is_marked_for_deoptimization()) {
744 nm->make_not_entrant();
745 }
746 }
747 }
749 void CodeCache::verify() {
750 _heap->verify();
751 FOR_ALL_ALIVE_BLOBS(p) {
752 p->verify();
753 }
754 }
756 void CodeCache::report_codemem_full() {
757 _codemem_full_count++;
758 EventCodeCacheFull event;
759 if (event.should_commit()) {
760 event.set_startAddress((u8)low_bound());
761 event.set_commitedTopAddress((u8)high());
762 event.set_reservedTopAddress((u8)high_bound());
763 event.set_entryCount(nof_blobs());
764 event.set_methodCount(nof_nmethods());
765 event.set_adaptorCount(nof_adapters());
766 event.set_unallocatedCapacity(unallocated_capacity()/K);
767 event.set_fullCount(_codemem_full_count);
768 event.commit();
769 }
770 }
772 //------------------------------------------------------------------------------------------------
773 // Non-product version
775 #ifndef PRODUCT
777 void CodeCache::verify_if_often() {
778 if (VerifyCodeCacheOften) {
779 _heap->verify();
780 }
781 }
783 void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) {
784 if (PrintCodeCache2) { // Need to add a new flag
785 ResourceMark rm;
786 if (size == 0) size = cb->size();
787 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size);
788 }
789 }
791 void CodeCache::print_internals() {
792 int nmethodCount = 0;
793 int runtimeStubCount = 0;
794 int adapterCount = 0;
795 int deoptimizationStubCount = 0;
796 int uncommonTrapStubCount = 0;
797 int bufferBlobCount = 0;
798 int total = 0;
799 int nmethodAlive = 0;
800 int nmethodNotEntrant = 0;
801 int nmethodZombie = 0;
802 int nmethodUnloaded = 0;
803 int nmethodJava = 0;
804 int nmethodNative = 0;
805 int maxCodeSize = 0;
806 ResourceMark rm;
808 CodeBlob *cb;
809 for (cb = first(); cb != NULL; cb = next(cb)) {
810 total++;
811 if (cb->is_nmethod()) {
812 nmethod* nm = (nmethod*)cb;
814 if (Verbose && nm->method() != NULL) {
815 ResourceMark rm;
816 char *method_name = nm->method()->name_and_sig_as_C_string();
817 tty->print("%s", method_name);
818 if(nm->is_alive()) { tty->print_cr(" alive"); }
819 if(nm->is_not_entrant()) { tty->print_cr(" not-entrant"); }
820 if(nm->is_zombie()) { tty->print_cr(" zombie"); }
821 }
823 nmethodCount++;
825 if(nm->is_alive()) { nmethodAlive++; }
826 if(nm->is_not_entrant()) { nmethodNotEntrant++; }
827 if(nm->is_zombie()) { nmethodZombie++; }
828 if(nm->is_unloaded()) { nmethodUnloaded++; }
829 if(nm->is_native_method()) { nmethodNative++; }
831 if(nm->method() != NULL && nm->is_java_method()) {
832 nmethodJava++;
833 if (nm->insts_size() > maxCodeSize) {
834 maxCodeSize = nm->insts_size();
835 }
836 }
837 } else if (cb->is_runtime_stub()) {
838 runtimeStubCount++;
839 } else if (cb->is_deoptimization_stub()) {
840 deoptimizationStubCount++;
841 } else if (cb->is_uncommon_trap_stub()) {
842 uncommonTrapStubCount++;
843 } else if (cb->is_adapter_blob()) {
844 adapterCount++;
845 } else if (cb->is_buffer_blob()) {
846 bufferBlobCount++;
847 }
848 }
850 int bucketSize = 512;
851 int bucketLimit = maxCodeSize / bucketSize + 1;
852 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode);
853 memset(buckets,0,sizeof(int) * bucketLimit);
855 for (cb = first(); cb != NULL; cb = next(cb)) {
856 if (cb->is_nmethod()) {
857 nmethod* nm = (nmethod*)cb;
858 if(nm->is_java_method()) {
859 buckets[nm->insts_size() / bucketSize]++;
860 }
861 }
862 }
863 tty->print_cr("Code Cache Entries (total of %d)",total);
864 tty->print_cr("-------------------------------------------------");
865 tty->print_cr("nmethods: %d",nmethodCount);
866 tty->print_cr("\talive: %d",nmethodAlive);
867 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant);
868 tty->print_cr("\tzombie: %d",nmethodZombie);
869 tty->print_cr("\tunloaded: %d",nmethodUnloaded);
870 tty->print_cr("\tjava: %d",nmethodJava);
871 tty->print_cr("\tnative: %d",nmethodNative);
872 tty->print_cr("runtime_stubs: %d",runtimeStubCount);
873 tty->print_cr("adapters: %d",adapterCount);
874 tty->print_cr("buffer blobs: %d",bufferBlobCount);
875 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount);
876 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount);
877 tty->print_cr("\nnmethod size distribution (non-zombie java)");
878 tty->print_cr("-------------------------------------------------");
880 for(int i=0; i<bucketLimit; i++) {
881 if(buckets[i] != 0) {
882 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize);
883 tty->fill_to(40);
884 tty->print_cr("%d",buckets[i]);
885 }
886 }
888 FREE_C_HEAP_ARRAY(int, buckets, mtCode);
889 }
891 #endif // !PRODUCT
893 void CodeCache::print() {
894 print_summary(tty);
896 #ifndef PRODUCT
897 if (!Verbose) return;
899 CodeBlob_sizes live;
900 CodeBlob_sizes dead;
902 FOR_ALL_BLOBS(p) {
903 if (!p->is_alive()) {
904 dead.add(p);
905 } else {
906 live.add(p);
907 }
908 }
910 tty->print_cr("CodeCache:");
912 tty->print_cr("nmethod dependency checking time %f, per dependent %f", dependentCheckTime.seconds(),
913 dependentCheckTime.seconds() / dependentCheckCount);
915 if (!live.is_empty()) {
916 live.print("live");
917 }
918 if (!dead.is_empty()) {
919 dead.print("dead");
920 }
923 if (WizardMode) {
924 // print the oop_map usage
925 int code_size = 0;
926 int number_of_blobs = 0;
927 int number_of_oop_maps = 0;
928 int map_size = 0;
929 FOR_ALL_BLOBS(p) {
930 if (p->is_alive()) {
931 number_of_blobs++;
932 code_size += p->code_size();
933 OopMapSet* set = p->oop_maps();
934 if (set != NULL) {
935 number_of_oop_maps += set->size();
936 map_size += set->heap_size();
937 }
938 }
939 }
940 tty->print_cr("OopMaps");
941 tty->print_cr(" #blobs = %d", number_of_blobs);
942 tty->print_cr(" code size = %d", code_size);
943 tty->print_cr(" #oop_maps = %d", number_of_oop_maps);
944 tty->print_cr(" map size = %d", map_size);
945 }
947 #endif // !PRODUCT
948 }
950 void CodeCache::print_summary(outputStream* st, bool detailed) {
951 size_t total = (_heap->high_boundary() - _heap->low_boundary());
952 st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
953 "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
954 total/K, (total - unallocated_capacity())/K,
955 maxCodeCacheUsed/K, unallocated_capacity()/K);
957 if (detailed) {
958 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
959 p2i(_heap->low_boundary()),
960 p2i(_heap->high()),
961 p2i(_heap->high_boundary()));
962 st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT
963 " adapters=" UINT32_FORMAT,
964 nof_blobs(), nof_nmethods(), nof_adapters());
965 st->print_cr(" compilation: %s", CompileBroker::should_compile_new_jobs() ?
966 "enabled" : Arguments::mode() == Arguments::_int ?
967 "disabled (interpreter mode)" :
968 "disabled (not enough contiguous free space left)");
969 }
970 }
972 void CodeCache::log_state(outputStream* st) {
973 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
974 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
975 nof_blobs(), nof_nmethods(), nof_adapters(),
976 unallocated_capacity());
977 }