zgu@3900: /* zgu@4890: * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. zgu@3900: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. zgu@3900: * zgu@3900: * This code is free software; you can redistribute it and/or modify it zgu@3900: * under the terms of the GNU General Public License version 2 only, as zgu@3900: * published by the Free Software Foundation. zgu@3900: * zgu@3900: * This code is distributed in the hope that it will be useful, but WITHOUT zgu@3900: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or zgu@3900: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License zgu@3900: * version 2 for more details (a copy is included in the LICENSE file that zgu@3900: * accompanied this code). zgu@3900: * zgu@3900: * You should have received a copy of the GNU General Public License version zgu@3900: * 2 along with this work; if not, write to the Free Software Foundation, zgu@3900: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. zgu@3900: * zgu@3900: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA zgu@3900: * or visit www.oracle.com if you need additional information or have any zgu@3900: * questions. zgu@3900: * zgu@3900: */ zgu@3900: #include "precompiled.hpp" zgu@3900: zgu@4400: #include "oops/instanceKlass.hpp" zgu@3900: #include "runtime/atomic.hpp" zgu@3900: #include "runtime/interfaceSupport.hpp" zgu@3900: #include "runtime/mutexLocker.hpp" zgu@3900: #include "runtime/safepoint.hpp" zgu@3900: #include "runtime/threadCritical.hpp" ctornqvi@4512: #include "runtime/vm_operations.hpp" zgu@3900: #include "services/memPtr.hpp" zgu@3900: #include "services/memReporter.hpp" zgu@3900: #include "services/memTracker.hpp" zgu@3900: #include "utilities/decoder.hpp" jprovino@5188: #include "utilities/defaultStream.hpp" zgu@3900: #include "utilities/globalDefinitions.hpp" zgu@3900: zgu@3900: bool NMT_track_callsite = false; zgu@3900: zgu@3900: // walk all 'known' threads at NMT sync point, and collect their recorders zgu@3900: void SyncThreadRecorderClosure::do_thread(Thread* thread) { zgu@3900: assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); zgu@3900: if (thread->is_Java_thread()) { zgu@3900: JavaThread* javaThread = (JavaThread*)thread; zgu@3900: MemRecorder* recorder = javaThread->get_recorder(); zgu@3900: if (recorder != NULL) { zgu@3900: MemTracker::enqueue_pending_recorder(recorder); zgu@3900: javaThread->set_recorder(NULL); zgu@3900: } zgu@3900: } zgu@3900: _thread_count ++; zgu@3900: } zgu@3900: zgu@3900: zgu@4927: MemRecorder* volatile MemTracker::_global_recorder = NULL; zgu@3900: MemSnapshot* MemTracker::_snapshot = NULL; zgu@3900: MemBaseline MemTracker::_baseline; zgu@3936: Mutex* MemTracker::_query_lock = NULL; zgu@4927: MemRecorder* volatile MemTracker::_merge_pending_queue = NULL; zgu@4927: MemRecorder* volatile MemTracker::_pooled_recorders = NULL; zgu@3900: MemTrackWorker* MemTracker::_worker_thread = NULL; zgu@3900: int MemTracker::_sync_point_skip_count = 0; zgu@3900: MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off; zgu@3900: volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited; zgu@3900: MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; zgu@3900: int MemTracker::_thread_count = 255; zgu@3900: volatile jint MemTracker::_pooled_recorder_count = 0; ctornqvi@4512: volatile unsigned long MemTracker::_processing_generation = 0; ctornqvi@4512: volatile bool MemTracker::_worker_thread_idle = false; zgu@4810: volatile bool MemTracker::_slowdown_calling_thread = false; zgu@3900: debug_only(intx MemTracker::_main_thread_tid = 0;) zgu@3994: NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) zgu@3900: zgu@3900: void MemTracker::init_tracking_options(const char* option_line) { zgu@3900: _tracking_level = NMT_off; zgu@4291: if (strcmp(option_line, "=summary") == 0) { zgu@3900: _tracking_level = NMT_summary; zgu@4291: } else if (strcmp(option_line, "=detail") == 0) { jprovino@5188: // detail relies on a stack-walking ability that may not jprovino@5188: // be available depending on platform and/or compiler flags jprovino@5188: if (PLATFORM_NMT_DETAIL_SUPPORTED) { jprovino@5188: _tracking_level = NMT_detail; jprovino@5188: } else { jprovino@5188: jio_fprintf(defaultStream::error_stream(), jprovino@5188: "NMT detail is not supported on this platform. Using NMT summary instead."); jprovino@5188: _tracking_level = NMT_summary; jprovino@5188: } zgu@4291: } else if (strcmp(option_line, "=off") != 0) { zgu@4291: vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL); zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: // first phase of bootstrapping, when VM is still in single-threaded mode. zgu@3900: void MemTracker::bootstrap_single_thread() { zgu@3900: if (_tracking_level > NMT_off) { zgu@3900: assert(_state == NMT_uninited, "wrong state"); zgu@3900: zgu@3900: // NMT is not supported with UseMallocOnly is on. NMT can NOT zgu@3900: // handle the amount of malloc data without significantly impacting zgu@3900: // runtime performance when this flag is on. zgu@3900: if (UseMallocOnly) { zgu@3900: shutdown(NMT_use_malloc_only); zgu@3900: return; zgu@3900: } zgu@3900: zgu@3936: _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock"); zgu@3936: if (_query_lock == NULL) { zgu@3936: shutdown(NMT_out_of_memory); zgu@3936: return; zgu@3936: } zgu@3936: zgu@3900: debug_only(_main_thread_tid = os::current_thread_id();) zgu@3900: _state = NMT_bootstrapping_single_thread; zgu@3900: NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode. zgu@3900: void MemTracker::bootstrap_multi_thread() { zgu@3900: if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) { zgu@3900: // create nmt lock for multi-thread execution zgu@3900: assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); zgu@3900: _state = NMT_bootstrapping_multi_thread; zgu@3900: NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: // fully start nmt zgu@3900: void MemTracker::start() { zgu@3900: // Native memory tracking is off from command line option zgu@3900: if (_tracking_level == NMT_off || shutdown_in_progress()) return; zgu@3900: zgu@3900: assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); zgu@3900: assert(_state == NMT_bootstrapping_multi_thread, "wrong state"); zgu@3900: zgu@3900: _snapshot = new (std::nothrow)MemSnapshot(); zgu@4890: if (_snapshot != NULL) { zgu@4927: if (!_snapshot->out_of_memory() && start_worker(_snapshot)) { zgu@3900: _state = NMT_started; zgu@3900: NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack()); zgu@3900: return; zgu@3900: } zgu@4890: zgu@4890: delete _snapshot; zgu@4890: _snapshot = NULL; zgu@3900: } zgu@3900: zgu@3900: // fail to start native memory tracking, shut it down zgu@3900: shutdown(NMT_initialization); zgu@3900: } zgu@3900: zgu@3900: /** zgu@3900: * Shutting down native memory tracking. zgu@3900: * We can not shutdown native memory tracking immediately, so we just zgu@3900: * setup shutdown pending flag, every native memory tracking component zgu@3900: * should orderly shut itself down. zgu@3900: * zgu@3900: * The shutdown sequences: zgu@3900: * 1. MemTracker::shutdown() sets MemTracker to shutdown pending state zgu@3900: * 2. Worker thread calls MemTracker::final_shutdown(), which transites zgu@3900: * MemTracker to final shutdown state. zgu@3900: * 3. At sync point, MemTracker does final cleanup, before sets memory zgu@3900: * tracking level to off to complete shutdown. zgu@3900: */ zgu@3900: void MemTracker::shutdown(ShutdownReason reason) { zgu@3900: if (_tracking_level == NMT_off) return; zgu@3900: zgu@3900: if (_state <= NMT_bootstrapping_single_thread) { zgu@3900: // we still in single thread mode, there is not contention zgu@3900: _state = NMT_shutdown_pending; zgu@3900: _reason = reason; zgu@3900: } else { zgu@3900: // we want to know who initialized shutdown zgu@3900: if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending, zgu@3900: (jint*)&_state, (jint)NMT_started)) { zgu@3900: _reason = reason; zgu@3900: } zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: // final phase of shutdown zgu@3900: void MemTracker::final_shutdown() { zgu@3900: // delete all pending recorders and pooled recorders zgu@3900: delete_all_pending_recorders(); zgu@3900: delete_all_pooled_recorders(); zgu@3900: zgu@3900: { zgu@3900: // shared baseline and snapshot are the only objects needed to zgu@3900: // create query results zgu@3936: MutexLockerEx locker(_query_lock, true); zgu@3900: // cleanup baseline data and snapshot zgu@3900: _baseline.clear(); zgu@3900: delete _snapshot; zgu@3900: _snapshot = NULL; zgu@3900: } zgu@3900: zgu@3900: // shutdown shared decoder instance, since it is only zgu@3900: // used by native memory tracking so far. zgu@3900: Decoder::shutdown(); zgu@3900: zgu@3900: MemTrackWorker* worker = NULL; zgu@3900: { zgu@3900: ThreadCritical tc; zgu@3900: // can not delete worker inside the thread critical zgu@3900: if (_worker_thread != NULL && Thread::current() == _worker_thread) { zgu@3900: worker = _worker_thread; zgu@3900: _worker_thread = NULL; zgu@3900: } zgu@3900: } zgu@3900: if (worker != NULL) { zgu@3900: delete worker; zgu@3900: } zgu@3900: _state = NMT_final_shutdown; zgu@3900: } zgu@3900: zgu@3900: // delete all pooled recorders zgu@3900: void MemTracker::delete_all_pooled_recorders() { zgu@3900: // free all pooled recorders zgu@4927: MemRecorder* volatile cur_head = _pooled_recorders; zgu@3900: if (cur_head != NULL) { zgu@3900: MemRecorder* null_ptr = NULL; zgu@3900: while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, zgu@3900: (void*)&_pooled_recorders, (void*)cur_head)) { zgu@3900: cur_head = _pooled_recorders; zgu@3900: } zgu@3900: if (cur_head != NULL) { zgu@3900: delete cur_head; zgu@3900: _pooled_recorder_count = 0; zgu@3900: } zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: // delete all recorders in pending queue zgu@3900: void MemTracker::delete_all_pending_recorders() { zgu@3900: // free all pending recorders zgu@3900: MemRecorder* pending_head = get_pending_recorders(); zgu@3900: if (pending_head != NULL) { zgu@3900: delete pending_head; zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: /* zgu@3900: * retrieve per-thread recorder of specified thread. zgu@3900: * if thread == NULL, it means global recorder zgu@3900: */ zgu@3900: MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) { zgu@3900: if (shutdown_in_progress()) return NULL; zgu@3900: zgu@3900: MemRecorder* rc; zgu@3900: if (thread == NULL) { zgu@3900: rc = _global_recorder; zgu@3900: } else { zgu@3900: rc = thread->get_recorder(); zgu@3900: } zgu@3900: zgu@3900: if (rc != NULL && rc->is_full()) { zgu@3900: enqueue_pending_recorder(rc); zgu@3900: rc = NULL; zgu@3900: } zgu@3900: zgu@3900: if (rc == NULL) { zgu@3900: rc = get_new_or_pooled_instance(); zgu@3900: if (thread == NULL) { zgu@3900: _global_recorder = rc; zgu@3900: } else { zgu@3900: thread->set_recorder(rc); zgu@3900: } zgu@3900: } zgu@3900: return rc; zgu@3900: } zgu@3900: zgu@3900: /* zgu@3900: * get a per-thread recorder from pool, or create a new one if zgu@3900: * there is not one available. zgu@3900: */ zgu@3900: MemRecorder* MemTracker::get_new_or_pooled_instance() { zgu@3900: MemRecorder* cur_head = const_cast (_pooled_recorders); zgu@3900: if (cur_head == NULL) { zgu@3900: MemRecorder* rec = new (std::nothrow)MemRecorder(); zgu@3900: if (rec == NULL || rec->out_of_memory()) { zgu@3900: shutdown(NMT_out_of_memory); zgu@3900: if (rec != NULL) { zgu@3900: delete rec; zgu@3900: rec = NULL; zgu@3900: } zgu@3900: } zgu@3900: return rec; zgu@3900: } else { zgu@3900: MemRecorder* next_head = cur_head->next(); zgu@3900: if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders, zgu@3900: (void*)cur_head)) { zgu@3900: return get_new_or_pooled_instance(); zgu@3900: } zgu@3900: cur_head->set_next(NULL); zgu@3900: Atomic::dec(&_pooled_recorder_count); ctornqvi@4512: cur_head->set_generation(); zgu@3900: return cur_head; zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: /* zgu@3900: * retrieve all recorders in pending queue, and empty the queue zgu@3900: */ zgu@3900: MemRecorder* MemTracker::get_pending_recorders() { zgu@3900: MemRecorder* cur_head = const_cast(_merge_pending_queue); zgu@3900: MemRecorder* null_ptr = NULL; zgu@3900: while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue, zgu@3900: (void*)cur_head)) { zgu@3900: cur_head = const_cast(_merge_pending_queue); zgu@3900: } zgu@3994: NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count)); zgu@3900: return cur_head; zgu@3900: } zgu@3900: zgu@3900: /* zgu@3900: * release a recorder to recorder pool. zgu@3900: */ zgu@3900: void MemTracker::release_thread_recorder(MemRecorder* rec) { zgu@3900: assert(rec != NULL, "null recorder"); zgu@3900: // we don't want to pool too many recorders zgu@3900: rec->set_next(NULL); zgu@3900: if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) { zgu@3900: delete rec; zgu@3900: return; zgu@3900: } zgu@3900: zgu@3900: rec->clear(); zgu@3900: MemRecorder* cur_head = const_cast(_pooled_recorders); zgu@3900: rec->set_next(cur_head); zgu@3900: while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders, zgu@3900: (void*)cur_head)) { zgu@3900: cur_head = const_cast(_pooled_recorders); zgu@3900: rec->set_next(cur_head); zgu@3900: } zgu@3900: Atomic::inc(&_pooled_recorder_count); zgu@3900: } zgu@3900: zgu@3900: /* zgu@3900: * This is the most important method in whole nmt implementation. zgu@3900: * zgu@3900: * Create a memory record. zgu@3900: * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM zgu@3900: * still in single thread mode. zgu@3900: * 2. For all threads other than JavaThread, ThreadCritical is needed zgu@3900: * to write to recorders to global recorder. zgu@3900: * 3. For JavaThreads that are not longer visible by safepoint, also zgu@3900: * need to take ThreadCritical and records are written to global zgu@3900: * recorders, since these threads are NOT walked by Threads.do_thread(). zgu@3900: * 4. JavaThreads that are running in native state, have to transition zgu@3900: * to VM state before writing to per-thread recorders. zgu@3900: * 5. JavaThreads that are running in VM state do not need any lock and zgu@3900: * records are written to per-thread recorders. zgu@3900: * 6. For a thread has yet to attach VM 'Thread', they need to take zgu@3900: * ThreadCritical to write to global recorder. zgu@3900: * zgu@3900: * Important note: zgu@3900: * NO LOCK should be taken inside ThreadCritical lock !!! zgu@3900: */ zgu@3900: void MemTracker::create_memory_record(address addr, MEMFLAGS flags, zgu@3900: size_t size, address pc, Thread* thread) { zgu@4079: assert(addr != NULL, "Sanity check"); zgu@3900: if (!shutdown_in_progress()) { zgu@3900: // single thread, we just write records direct to global recorder,' zgu@3900: // with any lock zgu@3900: if (_state == NMT_bootstrapping_single_thread) { zgu@3900: assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); zgu@3900: thread = NULL; zgu@3900: } else { zgu@3900: if (thread == NULL) { zgu@3900: // don't use Thread::current(), since it is possible that zgu@3900: // the calling thread has yet to attach to VM 'Thread', zgu@3900: // which will result assertion failure zgu@3900: thread = ThreadLocalStorage::thread(); zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: if (thread != NULL) { zgu@4810: // slow down all calling threads except NMT worker thread, so it zgu@4810: // can catch up. zgu@4810: if (_slowdown_calling_thread && thread != _worker_thread) { zgu@4810: os::yield_all(); zgu@4810: } zgu@4810: zgu@3900: if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) { zgu@4193: JavaThread* java_thread = (JavaThread*)thread; zgu@3935: JavaThreadState state = java_thread->thread_state(); zgu@3935: if (SafepointSynchronize::safepoint_safe(java_thread, state)) { zgu@3935: // JavaThreads that are safepoint safe, can run through safepoint, zgu@3935: // so ThreadCritical is needed to ensure no threads at safepoint create zgu@3935: // new records while the records are being gathered and the sequence number is changing zgu@3935: ThreadCritical tc; zgu@3935: create_record_in_recorder(addr, flags, size, pc, java_thread); zgu@3900: } else { zgu@3935: create_record_in_recorder(addr, flags, size, pc, java_thread); zgu@3900: } zgu@3900: } else { zgu@3900: // other threads, such as worker and watcher threads, etc. need to zgu@3900: // take ThreadCritical to write to global recorder zgu@3900: ThreadCritical tc; zgu@3900: create_record_in_recorder(addr, flags, size, pc, NULL); zgu@3900: } zgu@3900: } else { zgu@3900: if (_state == NMT_bootstrapping_single_thread) { zgu@3900: // single thread, no lock needed zgu@3900: create_record_in_recorder(addr, flags, size, pc, NULL); zgu@3900: } else { zgu@3900: // for thread has yet to attach VM 'Thread', we can not use VM mutex. zgu@3900: // use native thread critical instead zgu@3900: ThreadCritical tc; zgu@3900: create_record_in_recorder(addr, flags, size, pc, NULL); zgu@3900: } zgu@3900: } zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: // write a record to proper recorder. No lock can be taken from this method zgu@3900: // down. zgu@3900: void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags, zgu@3935: size_t size, address pc, JavaThread* thread) { zgu@3900: zgu@3935: MemRecorder* rc = get_thread_recorder(thread); zgu@3900: if (rc != NULL) { zgu@3900: rc->record(addr, flags, size, pc); zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: /** zgu@3900: * enqueue a recorder to pending queue zgu@3900: */ zgu@3900: void MemTracker::enqueue_pending_recorder(MemRecorder* rec) { zgu@3900: assert(rec != NULL, "null recorder"); zgu@3900: zgu@3900: // we are shutting down, so just delete it zgu@3900: if (shutdown_in_progress()) { zgu@3900: rec->set_next(NULL); zgu@3900: delete rec; zgu@3900: return; zgu@3900: } zgu@3900: zgu@3900: MemRecorder* cur_head = const_cast(_merge_pending_queue); zgu@3900: rec->set_next(cur_head); zgu@3900: while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue, zgu@3900: (void*)cur_head)) { zgu@3900: cur_head = const_cast(_merge_pending_queue); zgu@3900: rec->set_next(cur_head); zgu@3900: } zgu@3994: NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);) zgu@3900: } zgu@3900: zgu@3900: /* zgu@3900: * The method is called at global safepoint zgu@3900: * during it synchronization process. zgu@3900: * 1. enqueue all JavaThreads' per-thread recorders zgu@3900: * 2. enqueue global recorder zgu@3900: * 3. retrieve all pending recorders zgu@3900: * 4. reset global sequence number generator zgu@3900: * 5. call worker's sync zgu@3900: */ zgu@3900: #define MAX_SAFEPOINTS_TO_SKIP 128 zgu@3900: #define SAFE_SEQUENCE_THRESHOLD 30 zgu@3900: #define HIGH_GENERATION_THRESHOLD 60 zgu@4810: #define MAX_RECORDER_THREAD_RATIO 30 zgu@3900: zgu@3900: void MemTracker::sync() { zgu@3900: assert(_tracking_level > NMT_off, "NMT is not enabled"); zgu@3900: assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); zgu@3900: zgu@3900: // Some GC tests hit large number of safepoints in short period of time zgu@3900: // without meaningful activities. We should prevent going to zgu@3900: // sync point in these cases, which can potentially exhaust generation buffer. zgu@3900: // Here is the factots to determine if we should go into sync point: zgu@3900: // 1. not to overflow sequence number zgu@3900: // 2. if we are in danger to overflow generation buffer zgu@3900: // 3. how many safepoints we already skipped sync point zgu@3900: if (_state == NMT_started) { zgu@3900: // worker thread is not ready, no one can manage generation zgu@3900: // buffer, so skip this safepoint zgu@3900: if (_worker_thread == NULL) return; zgu@3900: zgu@3900: if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) { zgu@3900: int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint; zgu@3900: int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS; zgu@3900: if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) { zgu@3900: _sync_point_skip_count ++; zgu@3900: return; zgu@3900: } zgu@3900: } zgu@3900: _sync_point_skip_count = 0; zgu@3900: { zgu@3900: // This method is running at safepoint, with ThreadCritical lock, zgu@3900: // it should guarantee that NMT is fully sync-ed. zgu@3900: ThreadCritical tc; zgu@3935: zgu@4193: SequenceGenerator::reset(); zgu@4193: zgu@3935: // walk all JavaThreads to collect recorders zgu@3935: SyncThreadRecorderClosure stc; zgu@3935: Threads::threads_do(&stc); zgu@3935: zgu@3935: _thread_count = stc.get_thread_count(); zgu@3935: MemRecorder* pending_recorders = get_pending_recorders(); zgu@3935: zgu@3900: if (_global_recorder != NULL) { zgu@3900: _global_recorder->set_next(pending_recorders); zgu@3900: pending_recorders = _global_recorder; zgu@3900: _global_recorder = NULL; zgu@3900: } zgu@4810: zgu@4810: // see if NMT has too many outstanding recorder instances, it usually zgu@4810: // means that worker thread is lagging behind in processing them. zgu@4810: if (!AutoShutdownNMT) { zgu@4810: _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count); zgu@4810: } zgu@4810: zgu@3900: // check _worker_thread with lock to avoid racing condition zgu@3900: if (_worker_thread != NULL) { zgu@4400: _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes()); zgu@3900: } zgu@4193: zgu@4193: assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: // now, it is the time to shut whole things off zgu@3900: if (_state == NMT_final_shutdown) { zgu@3900: // walk all JavaThreads to delete all recorders zgu@3900: SyncThreadRecorderClosure stc; zgu@3900: Threads::threads_do(&stc); zgu@3900: // delete global recorder zgu@3900: { zgu@3900: ThreadCritical tc; zgu@3900: if (_global_recorder != NULL) { zgu@3900: delete _global_recorder; zgu@3900: _global_recorder = NULL; zgu@3900: } zgu@3900: } zgu@3935: MemRecorder* pending_recorders = get_pending_recorders(); zgu@3935: if (pending_recorders != NULL) { zgu@3935: delete pending_recorders; zgu@3935: } zgu@3935: // try at a later sync point to ensure MemRecorder instance drops to zero to zgu@3935: // completely shutdown NMT zgu@3935: if (MemRecorder::_instance_count == 0) { zgu@3935: _state = NMT_shutdown; zgu@3935: _tracking_level = NMT_off; zgu@3935: } zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: /* zgu@3900: * Start worker thread. zgu@3900: */ zgu@4927: bool MemTracker::start_worker(MemSnapshot* snapshot) { zgu@4927: assert(_worker_thread == NULL && _snapshot != NULL, "Just Check"); zgu@4927: _worker_thread = new (std::nothrow) MemTrackWorker(snapshot); zgu@4927: if (_worker_thread == NULL) { zgu@4927: return false; zgu@4927: } else if (_worker_thread->has_error()) { zgu@4927: delete _worker_thread; zgu@4927: _worker_thread = NULL; zgu@3900: return false; zgu@3900: } zgu@3900: _worker_thread->start(); zgu@3900: return true; zgu@3900: } zgu@3900: zgu@3900: /* zgu@3900: * We need to collect a JavaThread's per-thread recorder zgu@3900: * before it exits. zgu@3900: */ zgu@3900: void MemTracker::thread_exiting(JavaThread* thread) { zgu@3900: if (is_on()) { zgu@3900: MemRecorder* rec = thread->get_recorder(); zgu@3900: if (rec != NULL) { zgu@3900: enqueue_pending_recorder(rec); zgu@3900: thread->set_recorder(NULL); zgu@3900: } zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: // baseline current memory snapshot zgu@3900: bool MemTracker::baseline() { zgu@4980: MutexLocker lock(_query_lock); zgu@3900: MemSnapshot* snapshot = get_snapshot(); zgu@3900: if (snapshot != NULL) { zgu@3900: return _baseline.baseline(*snapshot, false); zgu@3900: } zgu@3900: return false; zgu@3900: } zgu@3900: zgu@3900: // print memory usage from current snapshot zgu@3900: bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { zgu@3900: MemBaseline baseline; zgu@4980: MutexLocker lock(_query_lock); zgu@3900: MemSnapshot* snapshot = get_snapshot(); zgu@3900: if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { zgu@3900: BaselineReporter reporter(out, unit); zgu@3900: reporter.report_baseline(baseline, summary_only); zgu@3900: return true; zgu@3900: } zgu@3900: return false; zgu@3900: } zgu@3900: ctornqvi@4512: // Whitebox API for blocking until the current generation of NMT data has been merged ctornqvi@4512: bool MemTracker::wbtest_wait_for_data_merge() { ctornqvi@4512: // NMT can't be shutdown while we're holding _query_lock zgu@4980: MutexLocker lock(_query_lock); ctornqvi@4512: assert(_worker_thread != NULL, "Invalid query"); ctornqvi@4512: // the generation at query time, so NMT will spin till this generation is processed ctornqvi@4512: unsigned long generation_at_query_time = SequenceGenerator::current_generation(); ctornqvi@4512: unsigned long current_processing_generation = _processing_generation; ctornqvi@4512: // if generation counter overflown ctornqvi@4512: bool generation_overflown = (generation_at_query_time < current_processing_generation); ctornqvi@4512: long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; ctornqvi@4512: // spin ctornqvi@4512: while (!shutdown_in_progress()) { ctornqvi@4512: if (!generation_overflown) { ctornqvi@4512: if (current_processing_generation > generation_at_query_time) { ctornqvi@4512: return true; ctornqvi@4512: } ctornqvi@4512: } else { ctornqvi@4512: assert(generations_to_wrap >= 0, "Sanity check"); ctornqvi@4512: long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; ctornqvi@4512: assert(current_generations_to_wrap >= 0, "Sanity check"); ctornqvi@4512: // to overflow an unsigned long should take long time, so to_wrap check should be sufficient ctornqvi@4512: if (current_generations_to_wrap > generations_to_wrap && ctornqvi@4512: current_processing_generation > generation_at_query_time) { ctornqvi@4512: return true; ctornqvi@4512: } ctornqvi@4512: } ctornqvi@4512: ctornqvi@4512: // if worker thread is idle, but generation is not advancing, that means ctornqvi@4512: // there is not safepoint to let NMT advance generation, force one. ctornqvi@4512: if (_worker_thread_idle) { ctornqvi@4512: VM_ForceSafepoint vfs; ctornqvi@4512: VMThread::execute(&vfs); ctornqvi@4512: } ctornqvi@4512: MemSnapshot* snapshot = get_snapshot(); ctornqvi@4512: if (snapshot == NULL) { ctornqvi@4512: return false; ctornqvi@4512: } ctornqvi@4512: snapshot->wait(1000); ctornqvi@4512: current_processing_generation = _processing_generation; ctornqvi@4512: } ctornqvi@4512: // We end up here if NMT is shutting down before our data has been merged ctornqvi@4512: return false; ctornqvi@4512: } ctornqvi@4512: zgu@3900: // compare memory usage between current snapshot and baseline zgu@3900: bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { zgu@4980: MutexLocker lock(_query_lock); zgu@3900: if (_baseline.baselined()) { zgu@3900: MemBaseline baseline; zgu@3900: MemSnapshot* snapshot = get_snapshot(); zgu@3900: if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) { zgu@3900: BaselineReporter reporter(out, unit); zgu@3900: reporter.diff_baselines(baseline, _baseline, summary_only); zgu@3900: return true; zgu@3900: } zgu@3900: } zgu@3900: return false; zgu@3900: } zgu@3900: zgu@3900: #ifndef PRODUCT zgu@3900: void MemTracker::walk_stack(int toSkip, char* buf, int len) { zgu@3900: int cur_len = 0; zgu@3900: char tmp[1024]; zgu@3900: address pc; zgu@3900: zgu@3900: while (cur_len < len) { zgu@3900: pc = os::get_caller_pc(toSkip + 1); zgu@3900: if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) { zgu@3900: jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp); zgu@3900: cur_len = (int)strlen(buf); zgu@3900: } else { zgu@3900: buf[cur_len] = '\0'; zgu@3900: break; zgu@3900: } zgu@3900: toSkip ++; zgu@3900: } zgu@3900: } zgu@3900: zgu@3900: void MemTracker::print_tracker_stats(outputStream* st) { zgu@3900: st->print_cr("\nMemory Tracker Stats:"); zgu@3900: st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num()); zgu@3900: st->print_cr("\tthead count = %d", _thread_count); zgu@3900: st->print_cr("\tArena instance = %d", Arena::_instance_count); zgu@3900: st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count); zgu@3900: st->print_cr("\tqueued recorder count = %d", _pending_recorder_count); zgu@3900: st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count); zgu@3900: if (_worker_thread != NULL) { zgu@3900: st->print_cr("\tWorker thread:"); zgu@3900: st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count); zgu@3900: st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders()); zgu@3900: st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count); zgu@3900: } else { zgu@3900: st->print_cr("\tWorker thread is not started"); zgu@3900: } zgu@3900: st->print_cr(" "); zgu@3900: zgu@3900: if (_snapshot != NULL) { zgu@3900: _snapshot->print_snapshot_stats(st); zgu@3900: } else { zgu@3900: st->print_cr("No snapshot"); zgu@3900: } zgu@3900: } zgu@3900: #endif zgu@3900: