Mon, 16 Jul 2012 14:05:34 -0400
7181989: NMT ON: Assertion failure when NMT checks thread's native stack base address
Summary: Assertion on stack base is not necessary
Reviewed-by: coleenp, dholmes, kvn
1 /*
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24 #include "precompiled.hpp"
26 #include "runtime/atomic.hpp"
27 #include "runtime/interfaceSupport.hpp"
28 #include "runtime/mutexLocker.hpp"
29 #include "runtime/safepoint.hpp"
30 #include "runtime/threadCritical.hpp"
31 #include "services/memPtr.hpp"
32 #include "services/memReporter.hpp"
33 #include "services/memTracker.hpp"
34 #include "utilities/decoder.hpp"
35 #include "utilities/globalDefinitions.hpp"
37 bool NMT_track_callsite = false;
39 // walk all 'known' threads at NMT sync point, and collect their recorders
40 void SyncThreadRecorderClosure::do_thread(Thread* thread) {
41 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
42 if (thread->is_Java_thread()) {
43 JavaThread* javaThread = (JavaThread*)thread;
44 MemRecorder* recorder = javaThread->get_recorder();
45 if (recorder != NULL) {
46 MemTracker::enqueue_pending_recorder(recorder);
47 javaThread->set_recorder(NULL);
48 }
49 }
50 _thread_count ++;
51 }
54 MemRecorder* MemTracker::_global_recorder = NULL;
55 MemSnapshot* MemTracker::_snapshot = NULL;
56 MemBaseline MemTracker::_baseline;
57 Mutex MemTracker::_query_lock(Monitor::native, "NMT_queryLock");
58 volatile MemRecorder* MemTracker::_merge_pending_queue = NULL;
59 volatile MemRecorder* MemTracker::_pooled_recorders = NULL;
60 MemTrackWorker* MemTracker::_worker_thread = NULL;
61 int MemTracker::_sync_point_skip_count = 0;
62 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off;
63 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited;
64 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none;
65 int MemTracker::_thread_count = 255;
66 volatile jint MemTracker::_pooled_recorder_count = 0;
67 debug_only(intx MemTracker::_main_thread_tid = 0;)
68 debug_only(volatile jint MemTracker::_pending_recorder_count = 0;)
70 void MemTracker::init_tracking_options(const char* option_line) {
71 _tracking_level = NMT_off;
72 if (strncmp(option_line, "=summary", 8) == 0) {
73 _tracking_level = NMT_summary;
74 } else if (strncmp(option_line, "=detail", 8) == 0) {
75 _tracking_level = NMT_detail;
76 }
77 }
79 // first phase of bootstrapping, when VM is still in single-threaded mode.
80 void MemTracker::bootstrap_single_thread() {
81 if (_tracking_level > NMT_off) {
82 assert(_state == NMT_uninited, "wrong state");
84 // NMT is not supported with UseMallocOnly is on. NMT can NOT
85 // handle the amount of malloc data without significantly impacting
86 // runtime performance when this flag is on.
87 if (UseMallocOnly) {
88 shutdown(NMT_use_malloc_only);
89 return;
90 }
92 debug_only(_main_thread_tid = os::current_thread_id();)
93 _state = NMT_bootstrapping_single_thread;
94 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
95 }
96 }
98 // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
99 void MemTracker::bootstrap_multi_thread() {
100 if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
101 // create nmt lock for multi-thread execution
102 assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
103 _state = NMT_bootstrapping_multi_thread;
104 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
105 }
106 }
108 // fully start nmt
109 void MemTracker::start() {
110 // Native memory tracking is off from command line option
111 if (_tracking_level == NMT_off || shutdown_in_progress()) return;
113 assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
114 assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
116 _snapshot = new (std::nothrow)MemSnapshot();
117 if (_snapshot != NULL && !_snapshot->out_of_memory()) {
118 if (start_worker()) {
119 _state = NMT_started;
120 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
121 return;
122 }
123 }
125 // fail to start native memory tracking, shut it down
126 shutdown(NMT_initialization);
127 }
129 /**
130 * Shutting down native memory tracking.
131 * We can not shutdown native memory tracking immediately, so we just
132 * setup shutdown pending flag, every native memory tracking component
133 * should orderly shut itself down.
134 *
135 * The shutdown sequences:
136 * 1. MemTracker::shutdown() sets MemTracker to shutdown pending state
137 * 2. Worker thread calls MemTracker::final_shutdown(), which transites
138 * MemTracker to final shutdown state.
139 * 3. At sync point, MemTracker does final cleanup, before sets memory
140 * tracking level to off to complete shutdown.
141 */
142 void MemTracker::shutdown(ShutdownReason reason) {
143 if (_tracking_level == NMT_off) return;
145 if (_state <= NMT_bootstrapping_single_thread) {
146 // we still in single thread mode, there is not contention
147 _state = NMT_shutdown_pending;
148 _reason = reason;
149 } else {
150 // we want to know who initialized shutdown
151 if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
152 (jint*)&_state, (jint)NMT_started)) {
153 _reason = reason;
154 }
155 }
156 }
158 // final phase of shutdown
159 void MemTracker::final_shutdown() {
160 // delete all pending recorders and pooled recorders
161 delete_all_pending_recorders();
162 delete_all_pooled_recorders();
164 {
165 // shared baseline and snapshot are the only objects needed to
166 // create query results
167 MutexLockerEx locker(&_query_lock, true);
168 // cleanup baseline data and snapshot
169 _baseline.clear();
170 delete _snapshot;
171 _snapshot = NULL;
172 }
174 // shutdown shared decoder instance, since it is only
175 // used by native memory tracking so far.
176 Decoder::shutdown();
178 MemTrackWorker* worker = NULL;
179 {
180 ThreadCritical tc;
181 // can not delete worker inside the thread critical
182 if (_worker_thread != NULL && Thread::current() == _worker_thread) {
183 worker = _worker_thread;
184 _worker_thread = NULL;
185 }
186 }
187 if (worker != NULL) {
188 delete worker;
189 }
190 _state = NMT_final_shutdown;
191 }
193 // delete all pooled recorders
194 void MemTracker::delete_all_pooled_recorders() {
195 // free all pooled recorders
196 volatile MemRecorder* cur_head = _pooled_recorders;
197 if (cur_head != NULL) {
198 MemRecorder* null_ptr = NULL;
199 while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
200 (void*)&_pooled_recorders, (void*)cur_head)) {
201 cur_head = _pooled_recorders;
202 }
203 if (cur_head != NULL) {
204 delete cur_head;
205 _pooled_recorder_count = 0;
206 }
207 }
208 }
210 // delete all recorders in pending queue
211 void MemTracker::delete_all_pending_recorders() {
212 // free all pending recorders
213 MemRecorder* pending_head = get_pending_recorders();
214 if (pending_head != NULL) {
215 delete pending_head;
216 }
217 }
219 /*
220 * retrieve per-thread recorder of specified thread.
221 * if thread == NULL, it means global recorder
222 */
223 MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
224 if (shutdown_in_progress()) return NULL;
226 MemRecorder* rc;
227 if (thread == NULL) {
228 rc = _global_recorder;
229 } else {
230 rc = thread->get_recorder();
231 }
233 if (rc != NULL && rc->is_full()) {
234 enqueue_pending_recorder(rc);
235 rc = NULL;
236 }
238 if (rc == NULL) {
239 rc = get_new_or_pooled_instance();
240 if (thread == NULL) {
241 _global_recorder = rc;
242 } else {
243 thread->set_recorder(rc);
244 }
245 }
246 return rc;
247 }
249 /*
250 * get a per-thread recorder from pool, or create a new one if
251 * there is not one available.
252 */
253 MemRecorder* MemTracker::get_new_or_pooled_instance() {
254 MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
255 if (cur_head == NULL) {
256 MemRecorder* rec = new (std::nothrow)MemRecorder();
257 if (rec == NULL || rec->out_of_memory()) {
258 shutdown(NMT_out_of_memory);
259 if (rec != NULL) {
260 delete rec;
261 rec = NULL;
262 }
263 }
264 return rec;
265 } else {
266 MemRecorder* next_head = cur_head->next();
267 if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
268 (void*)cur_head)) {
269 return get_new_or_pooled_instance();
270 }
271 cur_head->set_next(NULL);
272 Atomic::dec(&_pooled_recorder_count);
273 debug_only(cur_head->set_generation();)
274 return cur_head;
275 }
276 }
278 /*
279 * retrieve all recorders in pending queue, and empty the queue
280 */
281 MemRecorder* MemTracker::get_pending_recorders() {
282 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
283 MemRecorder* null_ptr = NULL;
284 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
285 (void*)cur_head)) {
286 cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
287 }
288 debug_only(Atomic::store(0, &_pending_recorder_count));
289 return cur_head;
290 }
292 /*
293 * release a recorder to recorder pool.
294 */
295 void MemTracker::release_thread_recorder(MemRecorder* rec) {
296 assert(rec != NULL, "null recorder");
297 // we don't want to pool too many recorders
298 rec->set_next(NULL);
299 if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
300 delete rec;
301 return;
302 }
304 rec->clear();
305 MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
306 rec->set_next(cur_head);
307 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
308 (void*)cur_head)) {
309 cur_head = const_cast<MemRecorder*>(_pooled_recorders);
310 rec->set_next(cur_head);
311 }
312 Atomic::inc(&_pooled_recorder_count);
313 }
315 /*
316 * This is the most important method in whole nmt implementation.
317 *
318 * Create a memory record.
319 * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
320 * still in single thread mode.
321 * 2. For all threads other than JavaThread, ThreadCritical is needed
322 * to write to recorders to global recorder.
323 * 3. For JavaThreads that are not longer visible by safepoint, also
324 * need to take ThreadCritical and records are written to global
325 * recorders, since these threads are NOT walked by Threads.do_thread().
326 * 4. JavaThreads that are running in native state, have to transition
327 * to VM state before writing to per-thread recorders.
328 * 5. JavaThreads that are running in VM state do not need any lock and
329 * records are written to per-thread recorders.
330 * 6. For a thread has yet to attach VM 'Thread', they need to take
331 * ThreadCritical to write to global recorder.
332 *
333 * Important note:
334 * NO LOCK should be taken inside ThreadCritical lock !!!
335 */
336 void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
337 size_t size, address pc, Thread* thread) {
338 if (!shutdown_in_progress()) {
339 // single thread, we just write records direct to global recorder,'
340 // with any lock
341 if (_state == NMT_bootstrapping_single_thread) {
342 assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
343 thread = NULL;
344 } else {
345 if (thread == NULL) {
346 // don't use Thread::current(), since it is possible that
347 // the calling thread has yet to attach to VM 'Thread',
348 // which will result assertion failure
349 thread = ThreadLocalStorage::thread();
350 }
351 }
353 if (thread != NULL) {
354 // for a JavaThread, if it is running in native state, we need to transition it to
355 // VM state, so it can stop at safepoint. JavaThread running in VM state does not
356 // need lock to write records.
357 if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
358 if (((JavaThread*)thread)->thread_state() == _thread_in_native) {
359 ThreadInVMfromNative trans((JavaThread*)thread);
360 create_record_in_recorder(addr, flags, size, pc, thread);
361 } else {
362 create_record_in_recorder(addr, flags, size, pc, thread);
363 }
364 } else {
365 // other threads, such as worker and watcher threads, etc. need to
366 // take ThreadCritical to write to global recorder
367 ThreadCritical tc;
368 create_record_in_recorder(addr, flags, size, pc, NULL);
369 }
370 } else {
371 if (_state == NMT_bootstrapping_single_thread) {
372 // single thread, no lock needed
373 create_record_in_recorder(addr, flags, size, pc, NULL);
374 } else {
375 // for thread has yet to attach VM 'Thread', we can not use VM mutex.
376 // use native thread critical instead
377 ThreadCritical tc;
378 create_record_in_recorder(addr, flags, size, pc, NULL);
379 }
380 }
381 }
382 }
384 // write a record to proper recorder. No lock can be taken from this method
385 // down.
386 void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags,
387 size_t size, address pc, Thread* thread) {
388 assert(thread == NULL || thread->is_Java_thread(), "wrong thread");
390 MemRecorder* rc = get_thread_recorder((JavaThread*)thread);
391 if (rc != NULL) {
392 rc->record(addr, flags, size, pc);
393 }
394 }
396 /**
397 * enqueue a recorder to pending queue
398 */
399 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
400 assert(rec != NULL, "null recorder");
402 // we are shutting down, so just delete it
403 if (shutdown_in_progress()) {
404 rec->set_next(NULL);
405 delete rec;
406 return;
407 }
409 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
410 rec->set_next(cur_head);
411 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
412 (void*)cur_head)) {
413 cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
414 rec->set_next(cur_head);
415 }
416 debug_only(Atomic::inc(&_pending_recorder_count);)
417 }
419 /*
420 * The method is called at global safepoint
421 * during it synchronization process.
422 * 1. enqueue all JavaThreads' per-thread recorders
423 * 2. enqueue global recorder
424 * 3. retrieve all pending recorders
425 * 4. reset global sequence number generator
426 * 5. call worker's sync
427 */
428 #define MAX_SAFEPOINTS_TO_SKIP 128
429 #define SAFE_SEQUENCE_THRESHOLD 30
430 #define HIGH_GENERATION_THRESHOLD 60
432 void MemTracker::sync() {
433 assert(_tracking_level > NMT_off, "NMT is not enabled");
434 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
436 // Some GC tests hit large number of safepoints in short period of time
437 // without meaningful activities. We should prevent going to
438 // sync point in these cases, which can potentially exhaust generation buffer.
439 // Here is the factots to determine if we should go into sync point:
440 // 1. not to overflow sequence number
441 // 2. if we are in danger to overflow generation buffer
442 // 3. how many safepoints we already skipped sync point
443 if (_state == NMT_started) {
444 // worker thread is not ready, no one can manage generation
445 // buffer, so skip this safepoint
446 if (_worker_thread == NULL) return;
448 if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
449 int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
450 int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
451 if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
452 _sync_point_skip_count ++;
453 return;
454 }
455 }
456 _sync_point_skip_count = 0;
457 // walk all JavaThreads to collect recorders
458 SyncThreadRecorderClosure stc;
459 Threads::threads_do(&stc);
461 _thread_count = stc.get_thread_count();
462 MemRecorder* pending_recorders = get_pending_recorders();
464 {
465 // This method is running at safepoint, with ThreadCritical lock,
466 // it should guarantee that NMT is fully sync-ed.
467 ThreadCritical tc;
468 if (_global_recorder != NULL) {
469 _global_recorder->set_next(pending_recorders);
470 pending_recorders = _global_recorder;
471 _global_recorder = NULL;
472 }
473 SequenceGenerator::reset();
474 // check _worker_thread with lock to avoid racing condition
475 if (_worker_thread != NULL) {
476 _worker_thread->at_sync_point(pending_recorders);
477 }
478 }
479 }
481 // now, it is the time to shut whole things off
482 if (_state == NMT_final_shutdown) {
483 _tracking_level = NMT_off;
485 // walk all JavaThreads to delete all recorders
486 SyncThreadRecorderClosure stc;
487 Threads::threads_do(&stc);
488 // delete global recorder
489 {
490 ThreadCritical tc;
491 if (_global_recorder != NULL) {
492 delete _global_recorder;
493 _global_recorder = NULL;
494 }
495 }
497 _state = NMT_shutdown;
498 }
499 }
501 /*
502 * Start worker thread.
503 */
504 bool MemTracker::start_worker() {
505 assert(_worker_thread == NULL, "Just Check");
506 _worker_thread = new (std::nothrow) MemTrackWorker();
507 if (_worker_thread == NULL || _worker_thread->has_error()) {
508 shutdown(NMT_initialization);
509 return false;
510 }
511 _worker_thread->start();
512 return true;
513 }
515 /*
516 * We need to collect a JavaThread's per-thread recorder
517 * before it exits.
518 */
519 void MemTracker::thread_exiting(JavaThread* thread) {
520 if (is_on()) {
521 MemRecorder* rec = thread->get_recorder();
522 if (rec != NULL) {
523 enqueue_pending_recorder(rec);
524 thread->set_recorder(NULL);
525 }
526 }
527 }
529 // baseline current memory snapshot
530 bool MemTracker::baseline() {
531 MutexLockerEx lock(&_query_lock, true);
532 MemSnapshot* snapshot = get_snapshot();
533 if (snapshot != NULL) {
534 return _baseline.baseline(*snapshot, false);
535 }
536 return false;
537 }
539 // print memory usage from current snapshot
540 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
541 MemBaseline baseline;
542 MutexLockerEx lock(&_query_lock, true);
543 MemSnapshot* snapshot = get_snapshot();
544 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
545 BaselineReporter reporter(out, unit);
546 reporter.report_baseline(baseline, summary_only);
547 return true;
548 }
549 return false;
550 }
552 // compare memory usage between current snapshot and baseline
553 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
554 MutexLockerEx lock(&_query_lock, true);
555 if (_baseline.baselined()) {
556 MemBaseline baseline;
557 MemSnapshot* snapshot = get_snapshot();
558 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
559 BaselineReporter reporter(out, unit);
560 reporter.diff_baselines(baseline, _baseline, summary_only);
561 return true;
562 }
563 }
564 return false;
565 }
567 #ifndef PRODUCT
568 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
569 int cur_len = 0;
570 char tmp[1024];
571 address pc;
573 while (cur_len < len) {
574 pc = os::get_caller_pc(toSkip + 1);
575 if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
576 jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
577 cur_len = (int)strlen(buf);
578 } else {
579 buf[cur_len] = '\0';
580 break;
581 }
582 toSkip ++;
583 }
584 }
586 void MemTracker::print_tracker_stats(outputStream* st) {
587 st->print_cr("\nMemory Tracker Stats:");
588 st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
589 st->print_cr("\tthead count = %d", _thread_count);
590 st->print_cr("\tArena instance = %d", Arena::_instance_count);
591 st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
592 st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
593 st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
594 if (_worker_thread != NULL) {
595 st->print_cr("\tWorker thread:");
596 st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
597 st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
598 st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
599 } else {
600 st->print_cr("\tWorker thread is not started");
601 }
602 st->print_cr(" ");
604 if (_snapshot != NULL) {
605 _snapshot->print_snapshot_stats(st);
606 } else {
607 st->print_cr("No snapshot");
608 }
609 }
610 #endif