src/share/vm/services/memTracker.cpp

changeset 3900
d2a62e0f25eb
child 3935
7e5976e66c62
child 3936
f1f45dddb0bd
child 3938
04a9b3789683
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/services/memTracker.cpp	Thu Jun 28 17:03:16 2012 -0400
     1.3 @@ -0,0 +1,617 @@
     1.4 +/*
     1.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +#include "precompiled.hpp"
    1.28 +
    1.29 +#include "runtime/atomic.hpp"
    1.30 +#include "runtime/interfaceSupport.hpp"
    1.31 +#include "runtime/mutexLocker.hpp"
    1.32 +#include "runtime/safepoint.hpp"
    1.33 +#include "runtime/threadCritical.hpp"
    1.34 +#include "services/memPtr.hpp"
    1.35 +#include "services/memReporter.hpp"
    1.36 +#include "services/memTracker.hpp"
    1.37 +#include "utilities/decoder.hpp"
    1.38 +#include "utilities/globalDefinitions.hpp"
    1.39 +
    1.40 +bool NMT_track_callsite = false;
    1.41 +
    1.42 +// walk all 'known' threads at NMT sync point, and collect their recorders
    1.43 +void SyncThreadRecorderClosure::do_thread(Thread* thread) {
    1.44 +  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
    1.45 +  if (thread->is_Java_thread()) {
    1.46 +    JavaThread* javaThread = (JavaThread*)thread;
    1.47 +    MemRecorder* recorder = javaThread->get_recorder();
    1.48 +    if (recorder != NULL) {
    1.49 +      MemTracker::enqueue_pending_recorder(recorder);
    1.50 +      javaThread->set_recorder(NULL);
    1.51 +    }
    1.52 +  }
    1.53 +  _thread_count ++;
    1.54 +}
    1.55 +
    1.56 +
    1.57 +MemRecorder*                    MemTracker::_global_recorder = NULL;
    1.58 +MemSnapshot*                    MemTracker::_snapshot = NULL;
    1.59 +MemBaseline                     MemTracker::_baseline;
    1.60 +Mutex                           MemTracker::_query_lock(Monitor::native, "NMT_queryLock");
    1.61 +volatile MemRecorder*           MemTracker::_merge_pending_queue = NULL;
    1.62 +volatile MemRecorder*           MemTracker::_pooled_recorders = NULL;
    1.63 +MemTrackWorker*                 MemTracker::_worker_thread = NULL;
    1.64 +int                             MemTracker::_sync_point_skip_count = 0;
    1.65 +MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
    1.66 +volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
    1.67 +MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
    1.68 +int                             MemTracker::_thread_count = 255;
    1.69 +volatile jint                   MemTracker::_pooled_recorder_count = 0;
    1.70 +debug_only(intx                 MemTracker::_main_thread_tid = 0;)
    1.71 +debug_only(volatile jint        MemTracker::_pending_recorder_count = 0;)
    1.72 +
    1.73 +void MemTracker::init_tracking_options(const char* option_line) {
    1.74 +  _tracking_level = NMT_off;
    1.75 +  if (strncmp(option_line, "=summary", 8) == 0) {
    1.76 +    _tracking_level = NMT_summary;
    1.77 +  } else if (strncmp(option_line, "=detail", 8) == 0) {
    1.78 +    _tracking_level = NMT_detail;
    1.79 +  }
    1.80 +}
    1.81 +
    1.82 +// first phase of bootstrapping, when VM is still in single-threaded mode.
    1.83 +void MemTracker::bootstrap_single_thread() {
    1.84 +  if (_tracking_level > NMT_off) {
    1.85 +    assert(_state == NMT_uninited, "wrong state");
    1.86 +
    1.87 +    // NMT is not supported with UseMallocOnly is on. NMT can NOT
    1.88 +    // handle the amount of malloc data without significantly impacting
    1.89 +    // runtime performance when this flag is on.
    1.90 +    if (UseMallocOnly) {
    1.91 +      shutdown(NMT_use_malloc_only);
    1.92 +      return;
    1.93 +    }
    1.94 +
    1.95 +    debug_only(_main_thread_tid = os::current_thread_id();)
    1.96 +    _state = NMT_bootstrapping_single_thread;
    1.97 +    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
    1.98 +  }
    1.99 +}
   1.100 +
   1.101 +// second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
   1.102 +void MemTracker::bootstrap_multi_thread() {
   1.103 +  if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
   1.104 +  // create nmt lock for multi-thread execution
   1.105 +    assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
   1.106 +    _state = NMT_bootstrapping_multi_thread;
   1.107 +    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
   1.108 +  }
   1.109 +}
   1.110 +
   1.111 +// fully start nmt
   1.112 +void MemTracker::start() {
   1.113 +  // Native memory tracking is off from command line option
   1.114 +  if (_tracking_level == NMT_off || shutdown_in_progress()) return;
   1.115 +
   1.116 +  assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
   1.117 +  assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
   1.118 +
   1.119 +  _snapshot = new (std::nothrow)MemSnapshot();
   1.120 +  if (_snapshot != NULL && !_snapshot->out_of_memory()) {
   1.121 +    if (start_worker()) {
   1.122 +      _state = NMT_started;
   1.123 +      NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
   1.124 +      return;
   1.125 +    }
   1.126 +  }
   1.127 +
   1.128 +  // fail to start native memory tracking, shut it down
   1.129 +  shutdown(NMT_initialization);
   1.130 +}
   1.131 +
   1.132 +/**
   1.133 + * Shutting down native memory tracking.
   1.134 + * We can not shutdown native memory tracking immediately, so we just
   1.135 + * setup shutdown pending flag, every native memory tracking component
   1.136 + * should orderly shut itself down.
   1.137 + *
   1.138 + * The shutdown sequences:
   1.139 + *  1. MemTracker::shutdown() sets MemTracker to shutdown pending state
   1.140 + *  2. Worker thread calls MemTracker::final_shutdown(), which transites
   1.141 + *     MemTracker to final shutdown state.
   1.142 + *  3. At sync point, MemTracker does final cleanup, before sets memory
   1.143 + *     tracking level to off to complete shutdown.
   1.144 + */
   1.145 +void MemTracker::shutdown(ShutdownReason reason) {
   1.146 +  if (_tracking_level == NMT_off) return;
   1.147 +
   1.148 +  if (_state <= NMT_bootstrapping_single_thread) {
   1.149 +    // we still in single thread mode, there is not contention
   1.150 +    _state = NMT_shutdown_pending;
   1.151 +    _reason = reason;
   1.152 +  } else {
   1.153 +    // we want to know who initialized shutdown
   1.154 +    if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
   1.155 +                                       (jint*)&_state, (jint)NMT_started)) {
   1.156 +        _reason = reason;
   1.157 +    }
   1.158 +  }
   1.159 +}
   1.160 +
   1.161 +// final phase of shutdown
   1.162 +void MemTracker::final_shutdown() {
   1.163 +  // delete all pending recorders and pooled recorders
   1.164 +  delete_all_pending_recorders();
   1.165 +  delete_all_pooled_recorders();
   1.166 +
   1.167 +  {
   1.168 +    // shared baseline and snapshot are the only objects needed to
   1.169 +    // create query results
   1.170 +    MutexLockerEx locker(&_query_lock, true);
   1.171 +    // cleanup baseline data and snapshot
   1.172 +    _baseline.clear();
   1.173 +    delete _snapshot;
   1.174 +    _snapshot = NULL;
   1.175 +  }
   1.176 +
   1.177 +  // shutdown shared decoder instance, since it is only
   1.178 +  // used by native memory tracking so far.
   1.179 +  Decoder::shutdown();
   1.180 +
   1.181 +  MemTrackWorker* worker = NULL;
   1.182 +  {
   1.183 +    ThreadCritical tc;
   1.184 +    // can not delete worker inside the thread critical
   1.185 +    if (_worker_thread != NULL && Thread::current() == _worker_thread) {
   1.186 +      worker = _worker_thread;
   1.187 +      _worker_thread = NULL;
   1.188 +    }
   1.189 +  }
   1.190 +  if (worker != NULL) {
   1.191 +    delete worker;
   1.192 +  }
   1.193 +  _state = NMT_final_shutdown;
   1.194 +}
   1.195 +
   1.196 +// delete all pooled recorders
   1.197 +void MemTracker::delete_all_pooled_recorders() {
   1.198 +  // free all pooled recorders
   1.199 +  volatile MemRecorder* cur_head = _pooled_recorders;
   1.200 +  if (cur_head != NULL) {
   1.201 +    MemRecorder* null_ptr = NULL;
   1.202 +    while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
   1.203 +      (void*)&_pooled_recorders, (void*)cur_head)) {
   1.204 +      cur_head = _pooled_recorders;
   1.205 +    }
   1.206 +    if (cur_head != NULL) {
   1.207 +      delete cur_head;
   1.208 +      _pooled_recorder_count = 0;
   1.209 +    }
   1.210 +  }
   1.211 +}
   1.212 +
   1.213 +// delete all recorders in pending queue
   1.214 +void MemTracker::delete_all_pending_recorders() {
   1.215 +  // free all pending recorders
   1.216 +  MemRecorder* pending_head = get_pending_recorders();
   1.217 +  if (pending_head != NULL) {
   1.218 +    delete pending_head;
   1.219 +  }
   1.220 +}
   1.221 +
   1.222 +/*
   1.223 + * retrieve per-thread recorder of specified thread.
   1.224 + * if thread == NULL, it means global recorder
   1.225 + */
   1.226 +MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
   1.227 +  if (shutdown_in_progress()) return NULL;
   1.228 +
   1.229 +  MemRecorder* rc;
   1.230 +  if (thread == NULL) {
   1.231 +    rc = _global_recorder;
   1.232 +  } else {
   1.233 +    rc = thread->get_recorder();
   1.234 +  }
   1.235 +
   1.236 +  if (rc != NULL && rc->is_full()) {
   1.237 +    enqueue_pending_recorder(rc);
   1.238 +    rc = NULL;
   1.239 +  }
   1.240 +
   1.241 +  if (rc == NULL) {
   1.242 +    rc = get_new_or_pooled_instance();
   1.243 +    if (thread == NULL) {
   1.244 +      _global_recorder = rc;
   1.245 +    } else {
   1.246 +      thread->set_recorder(rc);
   1.247 +    }
   1.248 +  }
   1.249 +  return rc;
   1.250 +}
   1.251 +
   1.252 +/*
   1.253 + * get a per-thread recorder from pool, or create a new one if
   1.254 + * there is not one available.
   1.255 + */
   1.256 +MemRecorder* MemTracker::get_new_or_pooled_instance() {
   1.257 +   MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
   1.258 +   if (cur_head == NULL) {
   1.259 +     MemRecorder* rec = new (std::nothrow)MemRecorder();
   1.260 +     if (rec == NULL || rec->out_of_memory()) {
   1.261 +       shutdown(NMT_out_of_memory);
   1.262 +       if (rec != NULL) {
   1.263 +         delete rec;
   1.264 +         rec = NULL;
   1.265 +       }
   1.266 +     }
   1.267 +     return rec;
   1.268 +   } else {
   1.269 +     MemRecorder* next_head = cur_head->next();
   1.270 +     if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
   1.271 +       (void*)cur_head)) {
   1.272 +       return get_new_or_pooled_instance();
   1.273 +     }
   1.274 +     cur_head->set_next(NULL);
   1.275 +     Atomic::dec(&_pooled_recorder_count);
   1.276 +     debug_only(cur_head->set_generation();)
   1.277 +     return cur_head;
   1.278 +  }
   1.279 +}
   1.280 +
   1.281 +/*
   1.282 + * retrieve all recorders in pending queue, and empty the queue
   1.283 + */
   1.284 +MemRecorder* MemTracker::get_pending_recorders() {
   1.285 +  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
   1.286 +  MemRecorder* null_ptr = NULL;
   1.287 +  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
   1.288 +    (void*)cur_head)) {
   1.289 +    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
   1.290 +  }
   1.291 +  debug_only(Atomic::store(0, &_pending_recorder_count));
   1.292 +  return cur_head;
   1.293 +}
   1.294 +
   1.295 +/*
   1.296 + * release a recorder to recorder pool.
   1.297 + */
   1.298 +void MemTracker::release_thread_recorder(MemRecorder* rec) {
   1.299 +  assert(rec != NULL, "null recorder");
   1.300 +  // we don't want to pool too many recorders
   1.301 +  rec->set_next(NULL);
   1.302 +  if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
   1.303 +    delete rec;
   1.304 +    return;
   1.305 +  }
   1.306 +
   1.307 +  rec->clear();
   1.308 +  MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
   1.309 +  rec->set_next(cur_head);
   1.310 +  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
   1.311 +    (void*)cur_head)) {
   1.312 +    cur_head = const_cast<MemRecorder*>(_pooled_recorders);
   1.313 +    rec->set_next(cur_head);
   1.314 +  }
   1.315 +  Atomic::inc(&_pooled_recorder_count);
   1.316 +}
   1.317 +
   1.318 +/*
   1.319 + * This is the most important method in whole nmt implementation.
   1.320 + *
   1.321 + * Create a memory record.
   1.322 + * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
   1.323 + *    still in single thread mode.
   1.324 + * 2. For all threads other than JavaThread, ThreadCritical is needed
   1.325 + *    to write to recorders to global recorder.
   1.326 + * 3. For JavaThreads that are not longer visible by safepoint, also
   1.327 + *    need to take ThreadCritical and records are written to global
   1.328 + *    recorders, since these threads are NOT walked by Threads.do_thread().
   1.329 + * 4. JavaThreads that are running in native state, have to transition
   1.330 + *    to VM state before writing to per-thread recorders.
   1.331 + * 5. JavaThreads that are running in VM state do not need any lock and
   1.332 + *    records are written to per-thread recorders.
   1.333 + * 6. For a thread has yet to attach VM 'Thread', they need to take
   1.334 + *    ThreadCritical to write to global recorder.
   1.335 + *
   1.336 + *    Important note:
   1.337 + *    NO LOCK should be taken inside ThreadCritical lock !!!
   1.338 + */
   1.339 +void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
   1.340 +    size_t size, address pc, Thread* thread) {
   1.341 +  if (!shutdown_in_progress()) {
   1.342 +    // single thread, we just write records direct to global recorder,'
   1.343 +    // with any lock
   1.344 +    if (_state == NMT_bootstrapping_single_thread) {
   1.345 +      assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
   1.346 +      thread = NULL;
   1.347 +    } else {
   1.348 +      if (thread == NULL) {
   1.349 +          // don't use Thread::current(), since it is possible that
   1.350 +          // the calling thread has yet to attach to VM 'Thread',
   1.351 +          // which will result assertion failure
   1.352 +          thread = ThreadLocalStorage::thread();
   1.353 +      }
   1.354 +    }
   1.355 +
   1.356 +    if (thread != NULL) {
   1.357 +#ifdef ASSERT
   1.358 +      // cause assertion on stack base. This ensures that threads call
   1.359 +      // Thread::record_stack_base_and_size() method, which will create
   1.360 +      // thread native stack records.
   1.361 +      thread->stack_base();
   1.362 +#endif
   1.363 +      // for a JavaThread, if it is running in native state, we need to transition it to
   1.364 +      // VM state, so it can stop at safepoint. JavaThread running in VM state does not
   1.365 +      // need lock to write records.
   1.366 +      if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
   1.367 +        if (((JavaThread*)thread)->thread_state() == _thread_in_native) {
   1.368 +          ThreadInVMfromNative trans((JavaThread*)thread);
   1.369 +          create_record_in_recorder(addr, flags, size, pc, thread);
   1.370 +        } else {
   1.371 +          create_record_in_recorder(addr, flags, size, pc, thread);
   1.372 +        }
   1.373 +      } else {
   1.374 +        // other threads, such as worker and watcher threads, etc. need to
   1.375 +        // take ThreadCritical to write to global recorder
   1.376 +        ThreadCritical tc;
   1.377 +        create_record_in_recorder(addr, flags, size, pc, NULL);
   1.378 +      }
   1.379 +    } else {
   1.380 +      if (_state == NMT_bootstrapping_single_thread) {
   1.381 +        // single thread, no lock needed
   1.382 +        create_record_in_recorder(addr, flags, size, pc, NULL);
   1.383 +      } else {
   1.384 +        // for thread has yet to attach VM 'Thread', we can not use VM mutex.
   1.385 +        // use native thread critical instead
   1.386 +        ThreadCritical tc;
   1.387 +        create_record_in_recorder(addr, flags, size, pc, NULL);
   1.388 +      }
   1.389 +    }
   1.390 +  }
   1.391 +}
   1.392 +
   1.393 +// write a record to proper recorder. No lock can be taken from this method
   1.394 +// down.
   1.395 +void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags,
   1.396 +    size_t size, address pc, Thread* thread) {
   1.397 +    assert(thread == NULL || thread->is_Java_thread(), "wrong thread");
   1.398 +
   1.399 +    MemRecorder* rc = get_thread_recorder((JavaThread*)thread);
   1.400 +    if (rc != NULL) {
   1.401 +      rc->record(addr, flags, size, pc);
   1.402 +    }
   1.403 +}
   1.404 +
   1.405 +/**
   1.406 + * enqueue a recorder to pending queue
   1.407 + */
   1.408 +void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
   1.409 +  assert(rec != NULL, "null recorder");
   1.410 +
   1.411 +  // we are shutting down, so just delete it
   1.412 +  if (shutdown_in_progress()) {
   1.413 +    rec->set_next(NULL);
   1.414 +    delete rec;
   1.415 +    return;
   1.416 +  }
   1.417 +
   1.418 +  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
   1.419 +  rec->set_next(cur_head);
   1.420 +  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
   1.421 +    (void*)cur_head)) {
   1.422 +    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
   1.423 +    rec->set_next(cur_head);
   1.424 +  }
   1.425 +  debug_only(Atomic::inc(&_pending_recorder_count);)
   1.426 +}
   1.427 +
   1.428 +/*
   1.429 + * The method is called at global safepoint
   1.430 + * during it synchronization process.
   1.431 + *   1. enqueue all JavaThreads' per-thread recorders
   1.432 + *   2. enqueue global recorder
   1.433 + *   3. retrieve all pending recorders
   1.434 + *   4. reset global sequence number generator
   1.435 + *   5. call worker's sync
   1.436 + */
   1.437 +#define MAX_SAFEPOINTS_TO_SKIP     128
   1.438 +#define SAFE_SEQUENCE_THRESHOLD    30
   1.439 +#define HIGH_GENERATION_THRESHOLD  60
   1.440 +
   1.441 +void MemTracker::sync() {
   1.442 +  assert(_tracking_level > NMT_off, "NMT is not enabled");
   1.443 +  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
   1.444 +
   1.445 +  // Some GC tests hit large number of safepoints in short period of time
   1.446 +  // without meaningful activities. We should prevent going to
   1.447 +  // sync point in these cases, which can potentially exhaust generation buffer.
   1.448 +  // Here is the factots to determine if we should go into sync point:
   1.449 +  // 1. not to overflow sequence number
   1.450 +  // 2. if we are in danger to overflow generation buffer
   1.451 +  // 3. how many safepoints we already skipped sync point
   1.452 +  if (_state == NMT_started) {
   1.453 +    // worker thread is not ready, no one can manage generation
   1.454 +    // buffer, so skip this safepoint
   1.455 +    if (_worker_thread == NULL) return;
   1.456 +
   1.457 +    if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
   1.458 +      int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
   1.459 +      int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
   1.460 +      if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
   1.461 +        _sync_point_skip_count ++;
   1.462 +        return;
   1.463 +      }
   1.464 +    }
   1.465 +    _sync_point_skip_count = 0;
   1.466 +    // walk all JavaThreads to collect recorders
   1.467 +    SyncThreadRecorderClosure stc;
   1.468 +    Threads::threads_do(&stc);
   1.469 +
   1.470 +    _thread_count = stc.get_thread_count();
   1.471 +    MemRecorder* pending_recorders = get_pending_recorders();
   1.472 +
   1.473 +    {
   1.474 +      // This method is running at safepoint, with ThreadCritical lock,
   1.475 +      // it should guarantee that NMT is fully sync-ed.
   1.476 +      ThreadCritical tc;
   1.477 +      if (_global_recorder != NULL) {
   1.478 +        _global_recorder->set_next(pending_recorders);
   1.479 +        pending_recorders = _global_recorder;
   1.480 +        _global_recorder = NULL;
   1.481 +      }
   1.482 +      SequenceGenerator::reset();
   1.483 +      // check _worker_thread with lock to avoid racing condition
   1.484 +      if (_worker_thread != NULL) {
   1.485 +        _worker_thread->at_sync_point(pending_recorders);
   1.486 +      }
   1.487 +    }
   1.488 +  }
   1.489 +
   1.490 +  // now, it is the time to shut whole things off
   1.491 +  if (_state == NMT_final_shutdown) {
   1.492 +    _tracking_level = NMT_off;
   1.493 +
   1.494 +    // walk all JavaThreads to delete all recorders
   1.495 +    SyncThreadRecorderClosure stc;
   1.496 +    Threads::threads_do(&stc);
   1.497 +    // delete global recorder
   1.498 +    {
   1.499 +      ThreadCritical tc;
   1.500 +      if (_global_recorder != NULL) {
   1.501 +        delete _global_recorder;
   1.502 +        _global_recorder = NULL;
   1.503 +      }
   1.504 +    }
   1.505 +
   1.506 +    _state = NMT_shutdown;
   1.507 +  }
   1.508 +}
   1.509 +
   1.510 +/*
   1.511 + * Start worker thread.
   1.512 + */
   1.513 +bool MemTracker::start_worker() {
   1.514 +  assert(_worker_thread == NULL, "Just Check");
   1.515 +  _worker_thread = new (std::nothrow) MemTrackWorker();
   1.516 +  if (_worker_thread == NULL || _worker_thread->has_error()) {
   1.517 +    shutdown(NMT_initialization);
   1.518 +    return false;
   1.519 +  }
   1.520 +  _worker_thread->start();
   1.521 +  return true;
   1.522 +}
   1.523 +
   1.524 +/*
   1.525 + * We need to collect a JavaThread's per-thread recorder
   1.526 + * before it exits.
   1.527 + */
   1.528 +void MemTracker::thread_exiting(JavaThread* thread) {
   1.529 +  if (is_on()) {
   1.530 +    MemRecorder* rec = thread->get_recorder();
   1.531 +    if (rec != NULL) {
   1.532 +      enqueue_pending_recorder(rec);
   1.533 +      thread->set_recorder(NULL);
   1.534 +    }
   1.535 +  }
   1.536 +}
   1.537 +
   1.538 +// baseline current memory snapshot
   1.539 +bool MemTracker::baseline() {
   1.540 +  MutexLockerEx lock(&_query_lock, true);
   1.541 +  MemSnapshot* snapshot = get_snapshot();
   1.542 +  if (snapshot != NULL) {
   1.543 +    return _baseline.baseline(*snapshot, false);
   1.544 +  }
   1.545 +  return false;
   1.546 +}
   1.547 +
   1.548 +// print memory usage from current snapshot
   1.549 +bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
   1.550 +  MemBaseline  baseline;
   1.551 +  MutexLockerEx lock(&_query_lock, true);
   1.552 +  MemSnapshot* snapshot = get_snapshot();
   1.553 +  if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
   1.554 +    BaselineReporter reporter(out, unit);
   1.555 +    reporter.report_baseline(baseline, summary_only);
   1.556 +    return true;
   1.557 +  }
   1.558 +  return false;
   1.559 +}
   1.560 +
   1.561 +// compare memory usage between current snapshot and baseline
   1.562 +bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
   1.563 +  MutexLockerEx lock(&_query_lock, true);
   1.564 +  if (_baseline.baselined()) {
   1.565 +    MemBaseline baseline;
   1.566 +    MemSnapshot* snapshot = get_snapshot();
   1.567 +    if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
   1.568 +      BaselineReporter reporter(out, unit);
   1.569 +      reporter.diff_baselines(baseline, _baseline, summary_only);
   1.570 +      return true;
   1.571 +    }
   1.572 +  }
   1.573 +  return false;
   1.574 +}
   1.575 +
   1.576 +#ifndef PRODUCT
   1.577 +void MemTracker::walk_stack(int toSkip, char* buf, int len) {
   1.578 +  int cur_len = 0;
   1.579 +  char tmp[1024];
   1.580 +  address pc;
   1.581 +
   1.582 +  while (cur_len < len) {
   1.583 +    pc = os::get_caller_pc(toSkip + 1);
   1.584 +    if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
   1.585 +      jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
   1.586 +      cur_len = (int)strlen(buf);
   1.587 +    } else {
   1.588 +      buf[cur_len] = '\0';
   1.589 +      break;
   1.590 +    }
   1.591 +    toSkip ++;
   1.592 +  }
   1.593 +}
   1.594 +
   1.595 +void MemTracker::print_tracker_stats(outputStream* st) {
   1.596 +  st->print_cr("\nMemory Tracker Stats:");
   1.597 +  st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
   1.598 +  st->print_cr("\tthead count = %d", _thread_count);
   1.599 +  st->print_cr("\tArena instance = %d", Arena::_instance_count);
   1.600 +  st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
   1.601 +  st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
   1.602 +  st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
   1.603 +  if (_worker_thread != NULL) {
   1.604 +    st->print_cr("\tWorker thread:");
   1.605 +    st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
   1.606 +    st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
   1.607 +    st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
   1.608 +  } else {
   1.609 +    st->print_cr("\tWorker thread is not started");
   1.610 +  }
   1.611 +  st->print_cr(" ");
   1.612 +
   1.613 +  if (_snapshot != NULL) {
   1.614 +    _snapshot->print_snapshot_stats(st);
   1.615 +  } else {
   1.616 +    st->print_cr("No snapshot");
   1.617 +  }
   1.618 +}
   1.619 +#endif
   1.620 +

mercurial