Mon, 18 Mar 2013 13:19:06 +0100
8008555: Debugging code in compiled method sometimes leaks memory
Summary: support for strings that have same life-time as code that uses them.
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "code/codeCache.hpp"
27 #include "code/compiledIC.hpp"
28 #include "code/icBuffer.hpp"
29 #include "code/nmethod.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "oops/method.hpp"
33 #include "runtime/atomic.hpp"
34 #include "runtime/compilationPolicy.hpp"
35 #include "runtime/mutexLocker.hpp"
36 #include "runtime/os.hpp"
37 #include "runtime/sweeper.hpp"
38 #include "runtime/vm_operations.hpp"
39 #include "utilities/events.hpp"
40 #include "utilities/xmlstream.hpp"
42 #ifdef ASSERT
44 #define SWEEP(nm) record_sweep(nm, __LINE__)
45 // Sweeper logging code
46 class SweeperRecord {
47 public:
48 int traversal;
49 int invocation;
50 int compile_id;
51 long traversal_mark;
52 int state;
53 const char* kind;
54 address vep;
55 address uep;
56 int line;
58 void print() {
59 tty->print_cr("traversal = %d invocation = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
60 PTR_FORMAT " state = %d traversal_mark %d line = %d",
61 traversal,
62 invocation,
63 compile_id,
64 kind == NULL ? "" : kind,
65 uep,
66 vep,
67 state,
68 traversal_mark,
69 line);
70 }
71 };
73 static int _sweep_index = 0;
74 static SweeperRecord* _records = NULL;
76 void NMethodSweeper::report_events(int id, address entry) {
77 if (_records != NULL) {
78 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
79 if (_records[i].uep == entry ||
80 _records[i].vep == entry ||
81 _records[i].compile_id == id) {
82 _records[i].print();
83 }
84 }
85 for (int i = 0; i < _sweep_index; i++) {
86 if (_records[i].uep == entry ||
87 _records[i].vep == entry ||
88 _records[i].compile_id == id) {
89 _records[i].print();
90 }
91 }
92 }
93 }
95 void NMethodSweeper::report_events() {
96 if (_records != NULL) {
97 for (int i = _sweep_index; i < SweeperLogEntries; i++) {
98 // skip empty records
99 if (_records[i].vep == NULL) continue;
100 _records[i].print();
101 }
102 for (int i = 0; i < _sweep_index; i++) {
103 // skip empty records
104 if (_records[i].vep == NULL) continue;
105 _records[i].print();
106 }
107 }
108 }
110 void NMethodSweeper::record_sweep(nmethod* nm, int line) {
111 if (_records != NULL) {
112 _records[_sweep_index].traversal = _traversals;
113 _records[_sweep_index].traversal_mark = nm->_stack_traversal_mark;
114 _records[_sweep_index].invocation = _invocations;
115 _records[_sweep_index].compile_id = nm->compile_id();
116 _records[_sweep_index].kind = nm->compile_kind();
117 _records[_sweep_index].state = nm->_state;
118 _records[_sweep_index].vep = nm->verified_entry_point();
119 _records[_sweep_index].uep = nm->entry_point();
120 _records[_sweep_index].line = line;
122 _sweep_index = (_sweep_index + 1) % SweeperLogEntries;
123 }
124 }
125 #else
126 #define SWEEP(nm)
127 #endif
130 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
131 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
132 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
134 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
135 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
137 jint NMethodSweeper::_locked_seen = 0;
138 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
139 bool NMethodSweeper::_rescan = false;
140 bool NMethodSweeper::_do_sweep = false;
141 bool NMethodSweeper::_was_full = false;
142 jint NMethodSweeper::_advise_to_sweep = 0;
143 jlong NMethodSweeper::_last_was_full = 0;
144 uint NMethodSweeper::_highest_marked = 0;
145 long NMethodSweeper::_was_full_traversal = 0;
147 class MarkActivationClosure: public CodeBlobClosure {
148 public:
149 virtual void do_code_blob(CodeBlob* cb) {
150 // If we see an activation belonging to a non_entrant nmethod, we mark it.
151 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
152 ((nmethod*)cb)->mark_as_seen_on_stack();
153 }
154 }
155 };
156 static MarkActivationClosure mark_activation_closure;
158 void NMethodSweeper::scan_stacks() {
159 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
160 if (!MethodFlushing) return;
161 _do_sweep = true;
163 // No need to synchronize access, since this is always executed at a
164 // safepoint. If we aren't in the middle of scan and a rescan
165 // hasn't been requested then just return. If UseCodeCacheFlushing is on and
166 // code cache flushing is in progress, don't skip sweeping to help make progress
167 // clearing space in the code cache.
168 if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
169 _do_sweep = false;
170 return;
171 }
173 // Make sure CompiledIC_lock in unlocked, since we might update some
174 // inline caches. If it is, we just bail-out and try later.
175 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
177 // Check for restart
178 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
179 if (_current == NULL) {
180 _seen = 0;
181 _invocations = NmethodSweepFraction;
182 _current = CodeCache::first_nmethod();
183 _traversals += 1;
184 if (PrintMethodFlushing) {
185 tty->print_cr("### Sweep: stack traversal %d", _traversals);
186 }
187 Threads::nmethods_do(&mark_activation_closure);
189 // reset the flags since we started a scan from the beginning.
190 _rescan = false;
191 _locked_seen = 0;
192 _not_entrant_seen_on_stack = 0;
193 }
195 if (UseCodeCacheFlushing) {
196 if (!CodeCache::needs_flushing()) {
197 // scan_stacks() runs during a safepoint, no race with setters
198 _advise_to_sweep = 0;
199 }
201 if (was_full()) {
202 // There was some progress so attempt to restart the compiler
203 jlong now = os::javaTimeMillis();
204 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
205 jlong curr_interval = now - _last_was_full;
206 if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
207 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
208 set_was_full(false);
210 // Update the _last_was_full time so we can tell how fast the
211 // code cache is filling up
212 _last_was_full = os::javaTimeMillis();
214 log_sweep("restart_compiler");
215 }
216 }
217 }
218 }
220 void NMethodSweeper::possibly_sweep() {
221 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
222 if ((!MethodFlushing) || (!_do_sweep)) return;
224 if (_invocations > 0) {
225 // Only one thread at a time will sweep
226 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
227 if (old != 0) {
228 return;
229 }
230 #ifdef ASSERT
231 if (LogSweeper && _records == NULL) {
232 // Create the ring buffer for the logging code
233 _records = NEW_C_HEAP_ARRAY(SweeperRecord, SweeperLogEntries, mtGC);
234 memset(_records, 0, sizeof(SweeperRecord) * SweeperLogEntries);
235 }
236 #endif
237 if (_invocations > 0) {
238 sweep_code_cache();
239 _invocations--;
240 }
241 _sweep_started = 0;
242 }
243 }
245 void NMethodSweeper::sweep_code_cache() {
246 #ifdef ASSERT
247 jlong sweep_start;
248 if (PrintMethodFlushing) {
249 sweep_start = os::javaTimeMillis();
250 }
251 #endif
252 if (PrintMethodFlushing && Verbose) {
253 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
254 }
256 // We want to visit all nmethods after NmethodSweepFraction
257 // invocations so divide the remaining number of nmethods by the
258 // remaining number of invocations. This is only an estimate since
259 // the number of nmethods changes during the sweep so the final
260 // stage must iterate until it there are no more nmethods.
261 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
263 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
264 assert(!CodeCache_lock->owned_by_self(), "just checking");
266 {
267 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
269 // The last invocation iterates until there are no more nmethods
270 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
271 if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
272 if (PrintMethodFlushing && Verbose) {
273 tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
274 }
275 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
277 assert(Thread::current()->is_Java_thread(), "should be java thread");
278 JavaThread* thread = (JavaThread*)Thread::current();
279 ThreadBlockInVM tbivm(thread);
280 thread->java_suspend_self();
281 }
282 // Since we will give up the CodeCache_lock, always skip ahead
283 // to the next nmethod. Other blobs can be deleted by other
284 // threads but nmethods are only reclaimed by the sweeper.
285 nmethod* next = CodeCache::next_nmethod(_current);
287 // Now ready to process nmethod and give up CodeCache_lock
288 {
289 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
290 process_nmethod(_current);
291 }
292 _seen++;
293 _current = next;
294 }
295 }
297 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
299 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
300 // we've completed a scan without making progress but there were
301 // nmethods we were unable to process either because they were
302 // locked or were still on stack. We don't have to aggresively
303 // clean them up so just stop scanning. We could scan once more
304 // but that complicates the control logic and it's unlikely to
305 // matter much.
306 if (PrintMethodFlushing) {
307 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
308 }
309 }
311 #ifdef ASSERT
312 if(PrintMethodFlushing) {
313 jlong sweep_end = os::javaTimeMillis();
314 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
315 }
316 #endif
318 if (_invocations == 1) {
319 log_sweep("finished");
320 }
321 }
323 class NMethodMarker: public StackObj {
324 private:
325 CompilerThread* _thread;
326 public:
327 NMethodMarker(nmethod* nm) {
328 _thread = CompilerThread::current();
329 if (!nm->is_zombie() && !nm->is_unloaded()) {
330 // Only expose live nmethods for scanning
331 _thread->set_scanned_nmethod(nm);
332 }
333 }
334 ~NMethodMarker() {
335 _thread->set_scanned_nmethod(NULL);
336 }
337 };
339 void NMethodSweeper::release_nmethod(nmethod *nm) {
340 // Clean up any CompiledICHolders
341 {
342 ResourceMark rm;
343 MutexLocker ml_patch(CompiledIC_lock);
344 RelocIterator iter(nm);
345 while (iter.next()) {
346 if (iter.type() == relocInfo::virtual_call_type) {
347 CompiledIC::cleanup_call_site(iter.virtual_call_reloc());
348 }
349 }
350 }
352 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
353 nm->flush();
354 }
356 void NMethodSweeper::process_nmethod(nmethod *nm) {
357 assert(!CodeCache_lock->owned_by_self(), "just checking");
359 // Make sure this nmethod doesn't get unloaded during the scan,
360 // since the locks acquired below might safepoint.
361 NMethodMarker nmm(nm);
363 SWEEP(nm);
365 // Skip methods that are currently referenced by the VM
366 if (nm->is_locked_by_vm()) {
367 // But still remember to clean-up inline caches for alive nmethods
368 if (nm->is_alive()) {
369 // Clean-up all inline caches that points to zombie/non-reentrant methods
370 MutexLocker cl(CompiledIC_lock);
371 nm->cleanup_inline_caches();
372 SWEEP(nm);
373 } else {
374 _locked_seen++;
375 SWEEP(nm);
376 }
377 return;
378 }
380 if (nm->is_zombie()) {
381 // If it is first time, we see nmethod then we mark it. Otherwise,
382 // we reclame it. When we have seen a zombie method twice, we know that
383 // there are no inline caches that refer to it.
384 if (nm->is_marked_for_reclamation()) {
385 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
386 if (PrintMethodFlushing && Verbose) {
387 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
388 }
389 release_nmethod(nm);
390 } else {
391 if (PrintMethodFlushing && Verbose) {
392 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
393 }
394 nm->mark_for_reclamation();
395 _rescan = true;
396 SWEEP(nm);
397 }
398 } else if (nm->is_not_entrant()) {
399 // If there is no current activations of this method on the
400 // stack we can safely convert it to a zombie method
401 if (nm->can_not_entrant_be_converted()) {
402 if (PrintMethodFlushing && Verbose) {
403 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
404 }
405 nm->make_zombie();
406 _rescan = true;
407 SWEEP(nm);
408 } else {
409 // Still alive, clean up its inline caches
410 MutexLocker cl(CompiledIC_lock);
411 nm->cleanup_inline_caches();
412 // we coudn't transition this nmethod so don't immediately
413 // request a rescan. If this method stays on the stack for a
414 // long time we don't want to keep rescanning the code cache.
415 _not_entrant_seen_on_stack++;
416 SWEEP(nm);
417 }
418 } else if (nm->is_unloaded()) {
419 // Unloaded code, just make it a zombie
420 if (PrintMethodFlushing && Verbose)
421 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
422 if (nm->is_osr_method()) {
423 SWEEP(nm);
424 // No inline caches will ever point to osr methods, so we can just remove it
425 release_nmethod(nm);
426 } else {
427 nm->make_zombie();
428 _rescan = true;
429 SWEEP(nm);
430 }
431 } else {
432 assert(nm->is_alive(), "should be alive");
434 if (UseCodeCacheFlushing) {
435 if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
436 (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
437 CodeCache::needs_flushing()) {
438 // This method has not been called since the forced cleanup happened
439 nm->make_not_entrant();
440 }
441 }
443 // Clean-up all inline caches that points to zombie/non-reentrant methods
444 MutexLocker cl(CompiledIC_lock);
445 nm->cleanup_inline_caches();
446 SWEEP(nm);
447 }
448 }
450 // Code cache unloading: when compilers notice the code cache is getting full,
451 // they will call a vm op that comes here. This code attempts to speculatively
452 // unload the oldest half of the nmethods (based on the compile job id) by
453 // saving the old code in a list in the CodeCache. Then
454 // execution resumes. If a method so marked is not called by the second sweeper
455 // stack traversal after the current one, the nmethod will be marked non-entrant and
456 // got rid of by normal sweeping. If the method is called, the Method*'s
457 // _code field is restored and the Method*/nmethod
458 // go back to their normal state.
459 void NMethodSweeper::handle_full_code_cache(bool is_full) {
460 // Only the first one to notice can advise us to start early cleaning
461 if (!is_full){
462 jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
463 if (old != 0) {
464 return;
465 }
466 }
468 if (is_full) {
469 // Since code cache is full, immediately stop new compiles
470 bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
471 if (!did_set) {
472 // only the first to notice can start the cleaning,
473 // others will go back and block
474 return;
475 }
476 set_was_full(true);
478 // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
479 jlong now = os::javaTimeMillis();
480 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
481 jlong curr_interval = now - _last_was_full;
482 if (curr_interval < max_interval) {
483 _rescan = true;
484 log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
485 curr_interval/1000);
486 return;
487 }
488 }
490 VM_HandleFullCodeCache op(is_full);
491 VMThread::execute(&op);
493 // rescan again as soon as possible
494 _rescan = true;
495 }
497 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
498 // If there was a race in detecting full code cache, only run
499 // one vm op for it or keep the compiler shut off
501 debug_only(jlong start = os::javaTimeMillis();)
503 if ((!was_full()) && (is_full)) {
504 if (!CodeCache::needs_flushing()) {
505 log_sweep("restart_compiler");
506 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
507 return;
508 }
509 }
511 // Traverse the code cache trying to dump the oldest nmethods
512 uint curr_max_comp_id = CompileBroker::get_compilation_id();
513 uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
514 log_sweep("start_cleaning");
516 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
517 jint disconnected = 0;
518 jint made_not_entrant = 0;
519 while ((nm != NULL)){
520 uint curr_comp_id = nm->compile_id();
522 // OSR methods cannot be flushed like this. Also, don't flush native methods
523 // since they are part of the JDK in most cases
524 if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
525 (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
527 if ((nm->method()->code() == nm)) {
528 // This method has not been previously considered for
529 // unloading or it was restored already
530 CodeCache::speculatively_disconnect(nm);
531 disconnected++;
532 } else if (nm->is_speculatively_disconnected()) {
533 // This method was previously considered for preemptive unloading and was not called since then
534 CompilationPolicy::policy()->delay_compilation(nm->method());
535 nm->make_not_entrant();
536 made_not_entrant++;
537 }
539 if (curr_comp_id > _highest_marked) {
540 _highest_marked = curr_comp_id;
541 }
542 }
543 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
544 }
546 log_sweep("stop_cleaning",
547 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
548 disconnected, made_not_entrant);
550 // Shut off compiler. Sweeper will start over with a new stack scan and
551 // traversal cycle and turn it back on if it clears enough space.
552 if (was_full()) {
553 _last_was_full = os::javaTimeMillis();
554 CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
555 }
557 // After two more traversals the sweeper will get rid of unrestored nmethods
558 _was_full_traversal = _traversals;
559 #ifdef ASSERT
560 jlong end = os::javaTimeMillis();
561 if(PrintMethodFlushing && Verbose) {
562 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
563 }
564 #endif
565 }
568 // Print out some state information about the current sweep and the
569 // state of the code cache if it's requested.
570 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
571 if (PrintMethodFlushing) {
572 stringStream s;
573 // Dump code cache state into a buffer before locking the tty,
574 // because log_state() will use locks causing lock conflicts.
575 CodeCache::log_state(&s);
577 ttyLocker ttyl;
578 tty->print("### sweeper: %s ", msg);
579 if (format != NULL) {
580 va_list ap;
581 va_start(ap, format);
582 tty->vprint(format, ap);
583 va_end(ap);
584 }
585 tty->print_cr(s.as_string());
586 }
588 if (LogCompilation && (xtty != NULL)) {
589 stringStream s;
590 // Dump code cache state into a buffer before locking the tty,
591 // because log_state() will use locks causing lock conflicts.
592 CodeCache::log_state(&s);
594 ttyLocker ttyl;
595 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
596 if (format != NULL) {
597 va_list ap;
598 va_start(ap, format);
599 xtty->vprint(format, ap);
600 va_end(ap);
601 }
602 xtty->print(s.as_string());
603 xtty->stamp();
604 xtty->end_elem();
605 }
606 }