src/share/vm/services/memTracker.cpp

changeset 7074
833b0f92429a
parent 6911
ce8f6bb717c9
child 7077
36c9011aaead
     1.1 --- a/src/share/vm/services/memTracker.cpp	Wed Aug 27 09:36:55 2014 +0200
     1.2 +++ b/src/share/vm/services/memTracker.cpp	Wed Aug 27 08:19:12 2014 -0400
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -23,862 +23,308 @@
    1.11   */
    1.12  #include "precompiled.hpp"
    1.13  
    1.14 -#include "oops/instanceKlass.hpp"
    1.15 -#include "runtime/atomic.hpp"
    1.16 -#include "runtime/interfaceSupport.hpp"
    1.17 -#include "runtime/mutexLocker.hpp"
    1.18 -#include "runtime/safepoint.hpp"
    1.19 -#include "runtime/threadCritical.hpp"
    1.20 -#include "runtime/thread.inline.hpp"
    1.21 -#include "runtime/vm_operations.hpp"
    1.22 -#include "services/memPtr.hpp"
    1.23 +#include "runtime/mutex.hpp"
    1.24 +#include "services/memBaseline.hpp"
    1.25  #include "services/memReporter.hpp"
    1.26 +#include "services/mallocTracker.inline.hpp"
    1.27  #include "services/memTracker.hpp"
    1.28 -#include "utilities/decoder.hpp"
    1.29  #include "utilities/defaultStream.hpp"
    1.30 -#include "utilities/globalDefinitions.hpp"
    1.31  
    1.32 -bool NMT_track_callsite = false;
    1.33 +#ifdef SOLARIS
    1.34 +  volatile bool NMT_stack_walkable = false;
    1.35 +#else
    1.36 +  volatile bool NMT_stack_walkable = true;
    1.37 +#endif
    1.38  
    1.39 -// walk all 'known' threads at NMT sync point, and collect their recorders
    1.40 -void SyncThreadRecorderClosure::do_thread(Thread* thread) {
    1.41 -  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
    1.42 -  if (thread->is_Java_thread()) {
    1.43 -    JavaThread* javaThread = (JavaThread*)thread;
    1.44 -    MemRecorder* recorder = javaThread->get_recorder();
    1.45 -    if (recorder != NULL) {
    1.46 -      MemTracker::enqueue_pending_recorder(recorder);
    1.47 -      javaThread->set_recorder(NULL);
    1.48 +volatile NMT_TrackingLevel MemTracker::_tracking_level = NMT_unknown;
    1.49 +NMT_TrackingLevel MemTracker::_cmdline_tracking_level = NMT_unknown;
    1.50 +
    1.51 +NativeCallStack emptyStack(0, false);
    1.52 +
    1.53 +MemBaseline MemTracker::_baseline;
    1.54 +Mutex*      MemTracker::_query_lock = NULL;
    1.55 +bool MemTracker::_is_nmt_env_valid = true;
    1.56 +
    1.57 +
    1.58 +NMT_TrackingLevel MemTracker::init_tracking_level() {
    1.59 +  NMT_TrackingLevel level = NMT_off;
    1.60 +  char buf[64];
    1.61 +  char nmt_option[64];
    1.62 +  jio_snprintf(buf, sizeof(buf), "NMT_LEVEL_%d", os::current_process_id());
    1.63 +  if (os::getenv(buf, nmt_option, sizeof(nmt_option))) {
    1.64 +    if (strcmp(nmt_option, "summary") == 0) {
    1.65 +      level = NMT_summary;
    1.66 +    } else if (strcmp(nmt_option, "detail") == 0) {
    1.67 +#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
    1.68 +      level = NMT_detail;
    1.69 +#else
    1.70 +      level = NMT_summary;
    1.71 +#endif // PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
    1.72 +    } else if (strcmp(nmt_option, "off") != 0) {
    1.73 +      // The option value is invalid
    1.74 +      _is_nmt_env_valid = false;
    1.75      }
    1.76 +
    1.77 +    // Remove the environment variable to avoid leaking to child processes
    1.78 +    os::unsetenv(buf);
    1.79    }
    1.80 -  _thread_count ++;
    1.81 +
    1.82 +  if (!MallocTracker::initialize(level) ||
    1.83 +      !VirtualMemoryTracker::initialize(level)) {
    1.84 +    level = NMT_off;
    1.85 +  }
    1.86 +  return level;
    1.87  }
    1.88  
    1.89 -
    1.90 -MemRecorder* volatile           MemTracker::_global_recorder = NULL;
    1.91 -MemSnapshot*                    MemTracker::_snapshot = NULL;
    1.92 -MemBaseline                     MemTracker::_baseline;
    1.93 -Mutex*                          MemTracker::_query_lock = NULL;
    1.94 -MemRecorder* volatile           MemTracker::_merge_pending_queue = NULL;
    1.95 -MemRecorder* volatile           MemTracker::_pooled_recorders = NULL;
    1.96 -MemTrackWorker*                 MemTracker::_worker_thread = NULL;
    1.97 -int                             MemTracker::_sync_point_skip_count = 0;
    1.98 -MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
    1.99 -volatile MemTracker::NMTStates  MemTracker::_state = NMT_uninited;
   1.100 -MemTracker::ShutdownReason      MemTracker::_reason = NMT_shutdown_none;
   1.101 -int                             MemTracker::_thread_count = 255;
   1.102 -volatile jint                   MemTracker::_pooled_recorder_count = 0;
   1.103 -volatile unsigned long          MemTracker::_processing_generation = 0;
   1.104 -volatile bool                   MemTracker::_worker_thread_idle = false;
   1.105 -volatile jint                   MemTracker::_pending_op_count = 0;
   1.106 -volatile bool                   MemTracker::_slowdown_calling_thread = false;
   1.107 -debug_only(intx                 MemTracker::_main_thread_tid = 0;)
   1.108 -NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
   1.109 -
   1.110 -void MemTracker::init_tracking_options(const char* option_line) {
   1.111 -  _tracking_level = NMT_off;
   1.112 -  if (strcmp(option_line, "=summary") == 0) {
   1.113 -    _tracking_level = NMT_summary;
   1.114 -  } else if (strcmp(option_line, "=detail") == 0) {
   1.115 -    // detail relies on a stack-walking ability that may not
   1.116 -    // be available depending on platform and/or compiler flags
   1.117 -#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
   1.118 -      _tracking_level = NMT_detail;
   1.119 -#else
   1.120 -      jio_fprintf(defaultStream::error_stream(),
   1.121 -        "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
   1.122 -      _tracking_level = NMT_summary;
   1.123 -#endif
   1.124 -  } else if (strcmp(option_line, "=off") != 0) {
   1.125 -    vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
   1.126 -  }
   1.127 -}
   1.128 -
   1.129 -// first phase of bootstrapping, when VM is still in single-threaded mode.
   1.130 -void MemTracker::bootstrap_single_thread() {
   1.131 -  if (_tracking_level > NMT_off) {
   1.132 -    assert(_state == NMT_uninited, "wrong state");
   1.133 -
   1.134 -    // NMT is not supported with UseMallocOnly is on. NMT can NOT
   1.135 -    // handle the amount of malloc data without significantly impacting
   1.136 -    // runtime performance when this flag is on.
   1.137 -    if (UseMallocOnly) {
   1.138 -      shutdown(NMT_use_malloc_only);
   1.139 -      return;
   1.140 -    }
   1.141 -
   1.142 +void MemTracker::init() {
   1.143 +  if (tracking_level() >= NMT_summary) {
   1.144      _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
   1.145 +    // Already OOM. It is unlikely, but still have to handle it.
   1.146      if (_query_lock == NULL) {
   1.147 -      shutdown(NMT_out_of_memory);
   1.148 -      return;
   1.149 -    }
   1.150 -
   1.151 -    debug_only(_main_thread_tid = os::current_thread_id();)
   1.152 -    _state = NMT_bootstrapping_single_thread;
   1.153 -    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
   1.154 -  }
   1.155 -}
   1.156 -
   1.157 -// second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
   1.158 -void MemTracker::bootstrap_multi_thread() {
   1.159 -  if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
   1.160 -  // create nmt lock for multi-thread execution
   1.161 -    assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
   1.162 -    _state = NMT_bootstrapping_multi_thread;
   1.163 -    NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
   1.164 -  }
   1.165 -}
   1.166 -
   1.167 -// fully start nmt
   1.168 -void MemTracker::start() {
   1.169 -  // Native memory tracking is off from command line option
   1.170 -  if (_tracking_level == NMT_off || shutdown_in_progress()) return;
   1.171 -
   1.172 -  assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
   1.173 -  assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
   1.174 -
   1.175 -  _snapshot = new (std::nothrow)MemSnapshot();
   1.176 -  if (_snapshot != NULL) {
   1.177 -    if (!_snapshot->out_of_memory() && start_worker(_snapshot)) {
   1.178 -      _state = NMT_started;
   1.179 -      NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
   1.180 -      return;
   1.181 -    }
   1.182 -
   1.183 -    delete _snapshot;
   1.184 -    _snapshot = NULL;
   1.185 -  }
   1.186 -
   1.187 -  // fail to start native memory tracking, shut it down
   1.188 -  shutdown(NMT_initialization);
   1.189 -}
   1.190 -
   1.191 -/**
   1.192 - * Shutting down native memory tracking.
   1.193 - * We can not shutdown native memory tracking immediately, so we just
   1.194 - * setup shutdown pending flag, every native memory tracking component
   1.195 - * should orderly shut itself down.
   1.196 - *
   1.197 - * The shutdown sequences:
   1.198 - *  1. MemTracker::shutdown() sets MemTracker to shutdown pending state
   1.199 - *  2. Worker thread calls MemTracker::final_shutdown(), which transites
   1.200 - *     MemTracker to final shutdown state.
   1.201 - *  3. At sync point, MemTracker does final cleanup, before sets memory
   1.202 - *     tracking level to off to complete shutdown.
   1.203 - */
   1.204 -void MemTracker::shutdown(ShutdownReason reason) {
   1.205 -  if (_tracking_level == NMT_off) return;
   1.206 -
   1.207 -  if (_state <= NMT_bootstrapping_single_thread) {
   1.208 -    // we still in single thread mode, there is not contention
   1.209 -    _state = NMT_shutdown_pending;
   1.210 -    _reason = reason;
   1.211 -  } else {
   1.212 -    // we want to know who initialized shutdown
   1.213 -    if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
   1.214 -                                       (jint*)&_state, (jint)NMT_started)) {
   1.215 -        _reason = reason;
   1.216 +      shutdown();
   1.217      }
   1.218    }
   1.219  }
   1.220  
   1.221 -// final phase of shutdown
   1.222 -void MemTracker::final_shutdown() {
   1.223 -  // delete all pending recorders and pooled recorders
   1.224 -  delete_all_pending_recorders();
   1.225 -  delete_all_pooled_recorders();
   1.226 -
   1.227 -  {
   1.228 -    // shared baseline and snapshot are the only objects needed to
   1.229 -    // create query results
   1.230 -    MutexLockerEx locker(_query_lock, true);
   1.231 -    // cleanup baseline data and snapshot
   1.232 -    _baseline.clear();
   1.233 -    delete _snapshot;
   1.234 -    _snapshot = NULL;
   1.235 +bool MemTracker::check_launcher_nmt_support(const char* value) {
   1.236 +  if (strcmp(value, "=detail") == 0) {
   1.237 +#if !PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
   1.238 +      jio_fprintf(defaultStream::error_stream(),
   1.239 +        "NMT detail is not supported on this platform.  Using NMT summary instead.\n");
   1.240 +    if (MemTracker::tracking_level() != NMT_summary) {
   1.241 +    return false;
   1.242 +  }
   1.243 +#else
   1.244 +    if (MemTracker::tracking_level() != NMT_detail) {
   1.245 +      return false;
   1.246 +    }
   1.247 +#endif
   1.248 +  } else if (strcmp(value, "=summary") == 0) {
   1.249 +    if (MemTracker::tracking_level() != NMT_summary) {
   1.250 +      return false;
   1.251 +    }
   1.252 +  } else if (strcmp(value, "=off") == 0) {
   1.253 +    if (MemTracker::tracking_level() != NMT_off) {
   1.254 +      return false;
   1.255 +    }
   1.256 +  } else {
   1.257 +    _is_nmt_env_valid = false;
   1.258    }
   1.259  
   1.260 -  // shutdown shared decoder instance, since it is only
   1.261 -  // used by native memory tracking so far.
   1.262 -  Decoder::shutdown();
   1.263 -
   1.264 -  MemTrackWorker* worker = NULL;
   1.265 -  {
   1.266 -    ThreadCritical tc;
   1.267 -    // can not delete worker inside the thread critical
   1.268 -    if (_worker_thread != NULL && Thread::current() == _worker_thread) {
   1.269 -      worker = _worker_thread;
   1.270 -      _worker_thread = NULL;
   1.271 -    }
   1.272 -  }
   1.273 -  if (worker != NULL) {
   1.274 -    delete worker;
   1.275 -  }
   1.276 -  _state = NMT_final_shutdown;
   1.277 +  return true;
   1.278  }
   1.279  
   1.280 -// delete all pooled recorders
   1.281 -void MemTracker::delete_all_pooled_recorders() {
   1.282 -  // free all pooled recorders
   1.283 -  MemRecorder* volatile cur_head = _pooled_recorders;
   1.284 -  if (cur_head != NULL) {
   1.285 -    MemRecorder* null_ptr = NULL;
   1.286 -    while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
   1.287 -      (void*)&_pooled_recorders, (void*)cur_head)) {
   1.288 -      cur_head = _pooled_recorders;
   1.289 -    }
   1.290 -    if (cur_head != NULL) {
   1.291 -      delete cur_head;
   1.292 -      _pooled_recorder_count = 0;
   1.293 -    }
   1.294 +bool MemTracker::verify_nmt_option() {
   1.295 +  return _is_nmt_env_valid;
   1.296 +}
   1.297 +
   1.298 +void* MemTracker::malloc_base(void* memblock) {
   1.299 +  return MallocTracker::get_base(memblock);
   1.300 +}
   1.301 +
   1.302 +void Tracker::record(address addr, size_t size) {
   1.303 +  if (MemTracker::tracking_level() < NMT_summary) return;
   1.304 +  switch(_type) {
   1.305 +    case uncommit:
   1.306 +      VirtualMemoryTracker::remove_uncommitted_region(addr, size);
   1.307 +      break;
   1.308 +    case release:
   1.309 +      VirtualMemoryTracker::remove_released_region(addr, size);
   1.310 +        break;
   1.311 +    default:
   1.312 +      ShouldNotReachHere();
   1.313    }
   1.314  }
   1.315  
   1.316 -// delete all recorders in pending queue
   1.317 -void MemTracker::delete_all_pending_recorders() {
   1.318 -  // free all pending recorders
   1.319 -  MemRecorder* pending_head = get_pending_recorders();
   1.320 -  if (pending_head != NULL) {
   1.321 -    delete pending_head;
   1.322 +
   1.323 +// Shutdown can only be issued via JCmd, and NMT JCmd is serialized
   1.324 +// by lock
   1.325 +void MemTracker::shutdown() {
   1.326 +  // We can only shutdown NMT to minimal tracking level if it is
   1.327 +  // ever on.
   1.328 +  if (tracking_level () > NMT_minimal) {
   1.329 +    transition_to(NMT_minimal);
   1.330    }
   1.331  }
   1.332  
   1.333 -/*
   1.334 - * retrieve per-thread recorder of specified thread.
   1.335 - * if thread == NULL, it means global recorder
   1.336 - */
   1.337 -MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
   1.338 -  if (shutdown_in_progress()) return NULL;
   1.339 +bool MemTracker::transition_to(NMT_TrackingLevel level) {
   1.340 +  NMT_TrackingLevel current_level = tracking_level();
   1.341  
   1.342 -  MemRecorder* rc;
   1.343 -  if (thread == NULL) {
   1.344 -    rc = _global_recorder;
   1.345 +  if (current_level == level) {
   1.346 +    return true;
   1.347 +  } else if (current_level > level) {
   1.348 +    // Downgrade tracking level, we want to lower the tracking
   1.349 +    // level first
   1.350 +    _tracking_level = level;
   1.351 +    // Make _tracking_level visible immediately.
   1.352 +    OrderAccess::fence();
   1.353 +    VirtualMemoryTracker::transition(current_level, level);
   1.354 +    MallocTracker::transition(current_level, level);
   1.355 +
   1.356 +    if (level == NMT_minimal) _baseline.reset();
   1.357    } else {
   1.358 -    rc = thread->get_recorder();
   1.359 +    VirtualMemoryTracker::transition(current_level, level);
   1.360 +    MallocTracker::transition(current_level, level);
   1.361 +
   1.362 +    _tracking_level = level;
   1.363 +    // Make _tracking_level visible immediately.
   1.364 +    OrderAccess::fence();
   1.365    }
   1.366  
   1.367 -  if (rc != NULL && rc->is_full()) {
   1.368 -    enqueue_pending_recorder(rc);
   1.369 -    rc = NULL;
   1.370 -  }
   1.371 -
   1.372 -  if (rc == NULL) {
   1.373 -    rc = get_new_or_pooled_instance();
   1.374 -    if (thread == NULL) {
   1.375 -      _global_recorder = rc;
   1.376 -    } else {
   1.377 -      thread->set_recorder(rc);
   1.378 -    }
   1.379 -  }
   1.380 -  return rc;
   1.381 +  return true;
   1.382  }
   1.383  
   1.384 -/*
   1.385 - * get a per-thread recorder from pool, or create a new one if
   1.386 - * there is not one available.
   1.387 - */
   1.388 -MemRecorder* MemTracker::get_new_or_pooled_instance() {
   1.389 -   MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
   1.390 -   if (cur_head == NULL) {
   1.391 -     MemRecorder* rec = new (std::nothrow)MemRecorder();
   1.392 -     if (rec == NULL || rec->out_of_memory()) {
   1.393 -       shutdown(NMT_out_of_memory);
   1.394 -       if (rec != NULL) {
   1.395 -         delete rec;
   1.396 -         rec = NULL;
   1.397 -       }
   1.398 -     }
   1.399 -     return rec;
   1.400 -   } else {
   1.401 -     MemRecorder* next_head = cur_head->next();
   1.402 -     if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
   1.403 -       (void*)cur_head)) {
   1.404 -       return get_new_or_pooled_instance();
   1.405 -     }
   1.406 -     cur_head->set_next(NULL);
   1.407 -     Atomic::dec(&_pooled_recorder_count);
   1.408 -     cur_head->set_generation();
   1.409 -     return cur_head;
   1.410 +void MemTracker::final_report(outputStream* output) {
   1.411 +  assert(output != NULL, "No output stream");
   1.412 +  if (tracking_level() >= NMT_summary) {
   1.413 +    MallocMemorySnapshot* malloc_memory_snapshot =
   1.414 +      MallocMemorySummary::as_snapshot();
   1.415 +    malloc_memory_snapshot->make_adjustment();
   1.416 +
   1.417 +    VirtualMemorySnapshot* virtual_memory_snapshot =
   1.418 +      VirtualMemorySummary::as_snapshot();
   1.419 +
   1.420 +    MemSummaryReporter rptr(malloc_memory_snapshot,
   1.421 +      virtual_memory_snapshot, output);
   1.422 +    rptr.report();
   1.423 +    // shutdown NMT, the data no longer accurate
   1.424 +    shutdown();
   1.425    }
   1.426  }
   1.427  
   1.428 -/*
   1.429 - * retrieve all recorders in pending queue, and empty the queue
   1.430 - */
   1.431 -MemRecorder* MemTracker::get_pending_recorders() {
   1.432 -  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
   1.433 -  MemRecorder* null_ptr = NULL;
   1.434 -  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
   1.435 -    (void*)cur_head)) {
   1.436 -    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
   1.437 -  }
   1.438 -  NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
   1.439 -  return cur_head;
   1.440 -}
   1.441 +// This is a walker to gather malloc site hashtable statistics,
   1.442 +// the result is used for tuning.
   1.443 +class StatisticsWalker : public MallocSiteWalker {
   1.444 + private:
   1.445 +  enum Threshold {
   1.446 +    // aggregates statistics over this threshold into one
   1.447 +    // line item.
   1.448 +    report_threshold = 20
   1.449 +  };
   1.450  
   1.451 -/*
   1.452 - * release a recorder to recorder pool.
   1.453 - */
   1.454 -void MemTracker::release_thread_recorder(MemRecorder* rec) {
   1.455 -  assert(rec != NULL, "null recorder");
   1.456 -  // we don't want to pool too many recorders
   1.457 -  rec->set_next(NULL);
   1.458 -  if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
   1.459 -    delete rec;
   1.460 -    return;
   1.461 + private:
   1.462 +  // Number of allocation sites that have all memory freed
   1.463 +  int   _empty_entries;
   1.464 +  // Total number of allocation sites, include empty sites
   1.465 +  int   _total_entries;
   1.466 +  // Number of captured call stack distribution
   1.467 +  int   _stack_depth_distribution[NMT_TrackingStackDepth];
   1.468 +  // Hash distribution
   1.469 +  int   _hash_distribution[report_threshold];
   1.470 +  // Number of hash buckets that have entries over the threshold
   1.471 +  int   _bucket_over_threshold;
   1.472 +
   1.473 +  // The hash bucket that walker is currently walking
   1.474 +  int   _current_hash_bucket;
   1.475 +  // The length of current hash bucket
   1.476 +  int   _current_bucket_length;
   1.477 +  // Number of hash buckets that are not empty
   1.478 +  int   _used_buckets;
   1.479 +  // Longest hash bucket length
   1.480 +  int   _longest_bucket_length;
   1.481 +
   1.482 + public:
   1.483 +  StatisticsWalker() : _empty_entries(0), _total_entries(0) {
   1.484 +    int index = 0;
   1.485 +    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
   1.486 +      _stack_depth_distribution[index] = 0;
   1.487 +    }
   1.488 +    for (index = 0; index < report_threshold; index ++) {
   1.489 +      _hash_distribution[index] = 0;
   1.490 +    }
   1.491 +    _bucket_over_threshold = 0;
   1.492 +    _longest_bucket_length = 0;
   1.493 +    _current_hash_bucket = -1;
   1.494 +    _current_bucket_length = 0;
   1.495 +    _used_buckets = 0;
   1.496    }
   1.497  
   1.498 -  rec->clear();
   1.499 -  MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
   1.500 -  rec->set_next(cur_head);
   1.501 -  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
   1.502 -    (void*)cur_head)) {
   1.503 -    cur_head = const_cast<MemRecorder*>(_pooled_recorders);
   1.504 -    rec->set_next(cur_head);
   1.505 -  }
   1.506 -  Atomic::inc(&_pooled_recorder_count);
   1.507 -}
   1.508 +  virtual bool at(const MallocSite* e) {
   1.509 +    if (e->size() == 0) _empty_entries ++;
   1.510 +    _total_entries ++;
   1.511  
   1.512 -// write a record to proper recorder. No lock can be taken from this method
   1.513 -// down.
   1.514 -void MemTracker::write_tracking_record(address addr, MEMFLAGS flags,
   1.515 -    size_t size, jint seq, address pc, JavaThread* thread) {
   1.516 +    // stack depth distrubution
   1.517 +    int frames = e->call_stack()->frames();
   1.518 +    _stack_depth_distribution[frames - 1] ++;
   1.519  
   1.520 -    MemRecorder* rc = get_thread_recorder(thread);
   1.521 -    if (rc != NULL) {
   1.522 -      rc->record(addr, flags, size, seq, pc);
   1.523 +    // hash distribution
   1.524 +    int hash_bucket = e->hash() % MallocSiteTable::hash_buckets();
   1.525 +    if (_current_hash_bucket == -1) {
   1.526 +      _current_hash_bucket = hash_bucket;
   1.527 +      _current_bucket_length = 1;
   1.528 +    } else if (_current_hash_bucket == hash_bucket) {
   1.529 +      _current_bucket_length ++;
   1.530 +    } else {
   1.531 +      record_bucket_length(_current_bucket_length);
   1.532 +      _current_hash_bucket = hash_bucket;
   1.533 +      _current_bucket_length = 1;
   1.534      }
   1.535 -}
   1.536 -
   1.537 -/**
   1.538 - * enqueue a recorder to pending queue
   1.539 - */
   1.540 -void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
   1.541 -  assert(rec != NULL, "null recorder");
   1.542 -
   1.543 -  // we are shutting down, so just delete it
   1.544 -  if (shutdown_in_progress()) {
   1.545 -    rec->set_next(NULL);
   1.546 -    delete rec;
   1.547 -    return;
   1.548 +    return true;
   1.549    }
   1.550  
   1.551 -  MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
   1.552 -  rec->set_next(cur_head);
   1.553 -  while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
   1.554 -    (void*)cur_head)) {
   1.555 -    cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
   1.556 -    rec->set_next(cur_head);
   1.557 +  // walk completed
   1.558 +  void completed() {
   1.559 +    record_bucket_length(_current_bucket_length);
   1.560    }
   1.561 -  NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);)
   1.562 -}
   1.563  
   1.564 -/*
   1.565 - * The method is called at global safepoint
   1.566 - * during it synchronization process.
   1.567 - *   1. enqueue all JavaThreads' per-thread recorders
   1.568 - *   2. enqueue global recorder
   1.569 - *   3. retrieve all pending recorders
   1.570 - *   4. reset global sequence number generator
   1.571 - *   5. call worker's sync
   1.572 - */
   1.573 -#define MAX_SAFEPOINTS_TO_SKIP     128
   1.574 -#define SAFE_SEQUENCE_THRESHOLD    30
   1.575 -#define HIGH_GENERATION_THRESHOLD  60
   1.576 -#define MAX_RECORDER_THREAD_RATIO  30
   1.577 -#define MAX_RECORDER_PER_THREAD    100
   1.578 -
   1.579 -void MemTracker::sync() {
   1.580 -  assert(_tracking_level > NMT_off, "NMT is not enabled");
   1.581 -  assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
   1.582 -
   1.583 -  // Some GC tests hit large number of safepoints in short period of time
   1.584 -  // without meaningful activities. We should prevent going to
   1.585 -  // sync point in these cases, which can potentially exhaust generation buffer.
   1.586 -  // Here is the factots to determine if we should go into sync point:
   1.587 -  // 1. not to overflow sequence number
   1.588 -  // 2. if we are in danger to overflow generation buffer
   1.589 -  // 3. how many safepoints we already skipped sync point
   1.590 -  if (_state == NMT_started) {
   1.591 -    // worker thread is not ready, no one can manage generation
   1.592 -    // buffer, so skip this safepoint
   1.593 -    if (_worker_thread == NULL) return;
   1.594 -
   1.595 -    if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
   1.596 -      int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
   1.597 -      int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
   1.598 -      if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
   1.599 -        _sync_point_skip_count ++;
   1.600 -        return;
   1.601 +  void report_statistics(outputStream* out) {
   1.602 +    int index;
   1.603 +    out->print_cr("Malloc allocation site table:");
   1.604 +    out->print_cr("\tTotal entries: %d", _total_entries);
   1.605 +    out->print_cr("\tEmpty entries: %d (%2.2f%%)", _empty_entries, ((float)_empty_entries * 100) / _total_entries);
   1.606 +    out->print_cr(" ");
   1.607 +    out->print_cr("Hash distribution:");
   1.608 +    if (_used_buckets < MallocSiteTable::hash_buckets()) {
   1.609 +      out->print_cr("empty bucket: %d", (MallocSiteTable::hash_buckets() - _used_buckets));
   1.610 +    }
   1.611 +    for (index = 0; index < report_threshold; index ++) {
   1.612 +      if (_hash_distribution[index] != 0) {
   1.613 +        if (index == 0) {
   1.614 +          out->print_cr("  %d    entry: %d", 1, _hash_distribution[0]);
   1.615 +        } else if (index < 9) { // single digit
   1.616 +          out->print_cr("  %d  entries: %d", (index + 1), _hash_distribution[index]);
   1.617 +        } else {
   1.618 +          out->print_cr(" %d entries: %d", (index + 1), _hash_distribution[index]);
   1.619 +        }
   1.620        }
   1.621      }
   1.622 -    {
   1.623 -      // This method is running at safepoint, with ThreadCritical lock,
   1.624 -      // it should guarantee that NMT is fully sync-ed.
   1.625 -      ThreadCritical tc;
   1.626 -
   1.627 -      // We can NOT execute NMT sync-point if there are pending tracking ops.
   1.628 -      if (_pending_op_count == 0) {
   1.629 -        SequenceGenerator::reset();
   1.630 -        _sync_point_skip_count = 0;
   1.631 -
   1.632 -        // walk all JavaThreads to collect recorders
   1.633 -        SyncThreadRecorderClosure stc;
   1.634 -        Threads::threads_do(&stc);
   1.635 -
   1.636 -        _thread_count = stc.get_thread_count();
   1.637 -        MemRecorder* pending_recorders = get_pending_recorders();
   1.638 -
   1.639 -        if (_global_recorder != NULL) {
   1.640 -          _global_recorder->set_next(pending_recorders);
   1.641 -          pending_recorders = _global_recorder;
   1.642 -          _global_recorder = NULL;
   1.643 -        }
   1.644 -
   1.645 -        // see if NMT has too many outstanding recorder instances, it usually
   1.646 -        // means that worker thread is lagging behind in processing them.
   1.647 -        if (!AutoShutdownNMT) {
   1.648 -          _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
   1.649 -        } else {
   1.650 -          // If auto shutdown is on, enforce MAX_RECORDER_PER_THREAD threshold to prevent OOM
   1.651 -          if (MemRecorder::_instance_count >= _thread_count * MAX_RECORDER_PER_THREAD) {
   1.652 -            shutdown(NMT_out_of_memory);
   1.653 -          }
   1.654 -        }
   1.655 -
   1.656 -        // check _worker_thread with lock to avoid racing condition
   1.657 -        if (_worker_thread != NULL) {
   1.658 -          _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
   1.659 -        }
   1.660 -        assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
   1.661 -      } else {
   1.662 -        _sync_point_skip_count ++;
   1.663 +    if (_bucket_over_threshold > 0) {
   1.664 +      out->print_cr(" >%d entries: %d", report_threshold,  _bucket_over_threshold);
   1.665 +    }
   1.666 +    out->print_cr("most entries: %d", _longest_bucket_length);
   1.667 +    out->print_cr(" ");
   1.668 +    out->print_cr("Call stack depth distribution:");
   1.669 +    for (index = 0; index < NMT_TrackingStackDepth; index ++) {
   1.670 +      if (_stack_depth_distribution[index] > 0) {
   1.671 +        out->print_cr("\t%d: %d", index + 1, _stack_depth_distribution[index]);
   1.672        }
   1.673      }
   1.674    }
   1.675  
   1.676 -  // now, it is the time to shut whole things off
   1.677 -  if (_state == NMT_final_shutdown) {
   1.678 -    // walk all JavaThreads to delete all recorders
   1.679 -    SyncThreadRecorderClosure stc;
   1.680 -    Threads::threads_do(&stc);
   1.681 -    // delete global recorder
   1.682 -    {
   1.683 -      ThreadCritical tc;
   1.684 -      if (_global_recorder != NULL) {
   1.685 -        delete _global_recorder;
   1.686 -        _global_recorder = NULL;
   1.687 -      }
   1.688 + private:
   1.689 +  void record_bucket_length(int length) {
   1.690 +    _used_buckets ++;
   1.691 +    if (length <= report_threshold) {
   1.692 +      _hash_distribution[length - 1] ++;
   1.693 +    } else {
   1.694 +      _bucket_over_threshold ++;
   1.695      }
   1.696 -    MemRecorder* pending_recorders = get_pending_recorders();
   1.697 -    if (pending_recorders != NULL) {
   1.698 -      delete pending_recorders;
   1.699 -    }
   1.700 -    // try at a later sync point to ensure MemRecorder instance drops to zero to
   1.701 -    // completely shutdown NMT
   1.702 -    if (MemRecorder::_instance_count == 0) {
   1.703 -      _state = NMT_shutdown;
   1.704 -      _tracking_level = NMT_off;
   1.705 -    }
   1.706 +    _longest_bucket_length = MAX2(_longest_bucket_length, length);
   1.707    }
   1.708 +};
   1.709 +
   1.710 +
   1.711 +void MemTracker::tuning_statistics(outputStream* out) {
   1.712 +  // NMT statistics
   1.713 +  StatisticsWalker walker;
   1.714 +  MallocSiteTable::walk_malloc_site(&walker);
   1.715 +  walker.completed();
   1.716 +
   1.717 +  out->print_cr("Native Memory Tracking Statistics:");
   1.718 +  out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
   1.719 +  out->print_cr("             Tracking stack depth: %d", NMT_TrackingStackDepth);
   1.720 +  NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
   1.721 +  out->print_cr(" ");
   1.722 +  walker.report_statistics(out);
   1.723  }
   1.724  
   1.725 -/*
   1.726 - * Start worker thread.
   1.727 - */
   1.728 -bool MemTracker::start_worker(MemSnapshot* snapshot) {
   1.729 -  assert(_worker_thread == NULL && _snapshot != NULL, "Just Check");
   1.730 -  _worker_thread = new (std::nothrow) MemTrackWorker(snapshot);
   1.731 -  if (_worker_thread == NULL) {
   1.732 -    return false;
   1.733 -  } else if (_worker_thread->has_error()) {
   1.734 -    delete _worker_thread;
   1.735 -    _worker_thread = NULL;
   1.736 -    return false;
   1.737 -  }
   1.738 -  _worker_thread->start();
   1.739 -  return true;
   1.740 -}
   1.741 -
   1.742 -/*
   1.743 - * We need to collect a JavaThread's per-thread recorder
   1.744 - * before it exits.
   1.745 - */
   1.746 -void MemTracker::thread_exiting(JavaThread* thread) {
   1.747 -  if (is_on()) {
   1.748 -    MemRecorder* rec = thread->get_recorder();
   1.749 -    if (rec != NULL) {
   1.750 -      enqueue_pending_recorder(rec);
   1.751 -      thread->set_recorder(NULL);
   1.752 -    }
   1.753 -  }
   1.754 -}
   1.755 -
   1.756 -// baseline current memory snapshot
   1.757 -bool MemTracker::baseline() {
   1.758 -  MutexLocker lock(_query_lock);
   1.759 -  MemSnapshot* snapshot = get_snapshot();
   1.760 -  if (snapshot != NULL) {
   1.761 -    return _baseline.baseline(*snapshot, false);
   1.762 -  }
   1.763 -  return false;
   1.764 -}
   1.765 -
   1.766 -// print memory usage from current snapshot
   1.767 -bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
   1.768 -  MemBaseline  baseline;
   1.769 -  MutexLocker  lock(_query_lock);
   1.770 -  MemSnapshot* snapshot = get_snapshot();
   1.771 -  if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
   1.772 -    BaselineReporter reporter(out, unit);
   1.773 -    reporter.report_baseline(baseline, summary_only);
   1.774 -    return true;
   1.775 -  }
   1.776 -  return false;
   1.777 -}
   1.778 -
   1.779 -// Whitebox API for blocking until the current generation of NMT data has been merged
   1.780 -bool MemTracker::wbtest_wait_for_data_merge() {
   1.781 -  // NMT can't be shutdown while we're holding _query_lock
   1.782 -  MutexLocker lock(_query_lock);
   1.783 -  assert(_worker_thread != NULL, "Invalid query");
   1.784 -  // the generation at query time, so NMT will spin till this generation is processed
   1.785 -  unsigned long generation_at_query_time = SequenceGenerator::current_generation();
   1.786 -  unsigned long current_processing_generation = _processing_generation;
   1.787 -  // if generation counter overflown
   1.788 -  bool generation_overflown = (generation_at_query_time < current_processing_generation);
   1.789 -  long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
   1.790 -  // spin
   1.791 -  while (!shutdown_in_progress()) {
   1.792 -    if (!generation_overflown) {
   1.793 -      if (current_processing_generation > generation_at_query_time) {
   1.794 -        return true;
   1.795 -      }
   1.796 -    } else {
   1.797 -      assert(generations_to_wrap >= 0, "Sanity check");
   1.798 -      long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
   1.799 -      assert(current_generations_to_wrap >= 0, "Sanity check");
   1.800 -      // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
   1.801 -      if (current_generations_to_wrap > generations_to_wrap &&
   1.802 -          current_processing_generation > generation_at_query_time) {
   1.803 -        return true;
   1.804 -      }
   1.805 -    }
   1.806 -
   1.807 -    // if worker thread is idle, but generation is not advancing, that means
   1.808 -    // there is not safepoint to let NMT advance generation, force one.
   1.809 -    if (_worker_thread_idle) {
   1.810 -      VM_ForceSafepoint vfs;
   1.811 -      VMThread::execute(&vfs);
   1.812 -    }
   1.813 -    MemSnapshot* snapshot = get_snapshot();
   1.814 -    if (snapshot == NULL) {
   1.815 -      return false;
   1.816 -    }
   1.817 -    snapshot->wait(1000);
   1.818 -    current_processing_generation = _processing_generation;
   1.819 -  }
   1.820 -  // We end up here if NMT is shutting down before our data has been merged
   1.821 -  return false;
   1.822 -}
   1.823 -
   1.824 -// compare memory usage between current snapshot and baseline
   1.825 -bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
   1.826 -  MutexLocker lock(_query_lock);
   1.827 -  if (_baseline.baselined()) {
   1.828 -    MemBaseline baseline;
   1.829 -    MemSnapshot* snapshot = get_snapshot();
   1.830 -    if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
   1.831 -      BaselineReporter reporter(out, unit);
   1.832 -      reporter.diff_baselines(baseline, _baseline, summary_only);
   1.833 -      return true;
   1.834 -    }
   1.835 -  }
   1.836 -  return false;
   1.837 -}
   1.838 -
   1.839 -#ifndef PRODUCT
   1.840 -void MemTracker::walk_stack(int toSkip, char* buf, int len) {
   1.841 -  int cur_len = 0;
   1.842 -  char tmp[1024];
   1.843 -  address pc;
   1.844 -
   1.845 -  while (cur_len < len) {
   1.846 -    pc = os::get_caller_pc(toSkip + 1);
   1.847 -    if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
   1.848 -      jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
   1.849 -      cur_len = (int)strlen(buf);
   1.850 -    } else {
   1.851 -      buf[cur_len] = '\0';
   1.852 -      break;
   1.853 -    }
   1.854 -    toSkip ++;
   1.855 -  }
   1.856 -}
   1.857 -
   1.858 -void MemTracker::print_tracker_stats(outputStream* st) {
   1.859 -  st->print_cr("\nMemory Tracker Stats:");
   1.860 -  st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
   1.861 -  st->print_cr("\tthead count = %d", _thread_count);
   1.862 -  st->print_cr("\tArena instance = %d", Arena::_instance_count);
   1.863 -  st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
   1.864 -  st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
   1.865 -  st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
   1.866 -  if (_worker_thread != NULL) {
   1.867 -    st->print_cr("\tWorker thread:");
   1.868 -    st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
   1.869 -    st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
   1.870 -    st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
   1.871 -  } else {
   1.872 -    st->print_cr("\tWorker thread is not started");
   1.873 -  }
   1.874 -  st->print_cr(" ");
   1.875 -
   1.876 -  if (_snapshot != NULL) {
   1.877 -    _snapshot->print_snapshot_stats(st);
   1.878 -  } else {
   1.879 -    st->print_cr("No snapshot");
   1.880 -  }
   1.881 -}
   1.882 -#endif
   1.883 -
   1.884 -
   1.885 -// Tracker Implementation
   1.886 -
   1.887 -/*
   1.888 - * Create a tracker.
   1.889 - * This is a fairly complicated constructor, as it has to make two important decisions:
   1.890 - *   1) Does it need to take ThreadCritical lock to write tracking record
   1.891 - *   2) Does it need to pre-reserve a sequence number for the tracking record
   1.892 - *
   1.893 - * The rules to determine if ThreadCritical is needed:
   1.894 - *   1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
   1.895 - *      still in single thread mode.
   1.896 - *   2. For all threads other than JavaThread, ThreadCritical is needed
   1.897 - *      to write to recorders to global recorder.
   1.898 - *   3. For JavaThreads that are no longer visible by safepoint, also
   1.899 - *      need to take ThreadCritical and records are written to global
   1.900 - *      recorders, since these threads are NOT walked by Threads.do_thread().
   1.901 - *   4. JavaThreads that are running in safepoint-safe states do not stop
   1.902 - *      for safepoints, ThreadCritical lock should be taken to write
   1.903 - *      memory records.
   1.904 - *   5. JavaThreads that are running in VM state do not need any lock and
   1.905 - *      records are written to per-thread recorders.
   1.906 - *   6. For a thread has yet to attach VM 'Thread', they need to take
   1.907 - *      ThreadCritical to write to global recorder.
   1.908 - *
   1.909 - *  The memory operations that need pre-reserve sequence numbers:
   1.910 - *    The memory operations that "release" memory blocks and the
   1.911 - *    operations can fail, need to pre-reserve sequence number. They
   1.912 - *    are realloc, uncommit and release.
   1.913 - *
   1.914 - *  The reason for pre-reserve sequence number, is to prevent race condition:
   1.915 - *    Thread 1                      Thread 2
   1.916 - *    <release>
   1.917 - *                                  <allocate>
   1.918 - *                                  <write allocate record>
   1.919 - *   <write release record>
   1.920 - *   if Thread 2 happens to obtain the memory address Thread 1 just released,
   1.921 - *   then NMT can mistakenly report the memory is free.
   1.922 - *
   1.923 - *  Noticeably, free() does not need pre-reserve sequence number, because the call
   1.924 - *  does not fail, so we can alway write "release" record before the memory is actaully
   1.925 - *  freed.
   1.926 - *
   1.927 - *  For realloc, uncommit and release, following coding pattern should be used:
   1.928 - *
   1.929 - *     MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
   1.930 - *     ptr = ::realloc(...);
   1.931 - *     if (ptr == NULL) {
   1.932 - *       tkr.record(...)
   1.933 - *     } else {
   1.934 - *       tkr.discard();
   1.935 - *     }
   1.936 - *
   1.937 - *     MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
   1.938 - *     if (uncommit(...)) {
   1.939 - *       tkr.record(...);
   1.940 - *     } else {
   1.941 - *       tkr.discard();
   1.942 - *     }
   1.943 - *
   1.944 - *     MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   1.945 - *     if (release(...)) {
   1.946 - *       tkr.record(...);
   1.947 - *     } else {
   1.948 - *       tkr.discard();
   1.949 - *     }
   1.950 - *
   1.951 - * Since pre-reserved sequence number is only good for the generation that it is acquired,
   1.952 - * when there is pending Tracker that reserved sequence number, NMT sync-point has
   1.953 - * to be skipped to prevent from advancing generation. This is done by inc and dec
   1.954 - * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped.
   1.955 - * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads
   1.956 - * that honor safepoints, safepoint can not occur during the memory operations, so the
   1.957 - * pre-reserved sequence number won't cross the generation boundry.
   1.958 - */
   1.959 -MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) {
   1.960 -  _op = NoOp;
   1.961 -  _seq = 0;
   1.962 -  if (MemTracker::is_on()) {
   1.963 -    _java_thread = NULL;
   1.964 -    _op = op;
   1.965 -
   1.966 -    // figure out if ThreadCritical lock is needed to write this operation
   1.967 -    // to MemTracker
   1.968 -    if (MemTracker::is_single_threaded_bootstrap()) {
   1.969 -      thr = NULL;
   1.970 -    } else if (thr == NULL) {
   1.971 -      // don't use Thread::current(), since it is possible that
   1.972 -      // the calling thread has yet to attach to VM 'Thread',
   1.973 -      // which will result assertion failure
   1.974 -      thr = ThreadLocalStorage::thread();
   1.975 -    }
   1.976 -
   1.977 -    if (thr != NULL) {
   1.978 -      // Check NMT load
   1.979 -      MemTracker::check_NMT_load(thr);
   1.980 -
   1.981 -      if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) {
   1.982 -        _java_thread = (JavaThread*)thr;
   1.983 -        JavaThreadState  state = _java_thread->thread_state();
   1.984 -        // JavaThreads that are safepoint safe, can run through safepoint,
   1.985 -        // so ThreadCritical is needed to ensure no threads at safepoint create
   1.986 -        // new records while the records are being gathered and the sequence number is changing
   1.987 -        _need_thread_critical_lock =
   1.988 -          SafepointSynchronize::safepoint_safe(_java_thread, state);
   1.989 -      } else {
   1.990 -        _need_thread_critical_lock = true;
   1.991 -      }
   1.992 -    } else {
   1.993 -       _need_thread_critical_lock
   1.994 -         = !MemTracker::is_single_threaded_bootstrap();
   1.995 -    }
   1.996 -
   1.997 -    // see if we need to pre-reserve sequence number for this operation
   1.998 -    if (_op == Realloc || _op == Uncommit || _op == Release) {
   1.999 -      if (_need_thread_critical_lock) {
  1.1000 -        ThreadCritical tc;
  1.1001 -        MemTracker::inc_pending_op_count();
  1.1002 -        _seq = SequenceGenerator::next();
  1.1003 -      } else {
  1.1004 -        // for the threads that honor safepoints, no safepoint can occur
  1.1005 -        // during the lifespan of tracker, so we don't need to increase
  1.1006 -        // pending op count.
  1.1007 -        _seq = SequenceGenerator::next();
  1.1008 -      }
  1.1009 -    }
  1.1010 -  }
  1.1011 -}
  1.1012 -
  1.1013 -void MemTracker::Tracker::discard() {
  1.1014 -  if (MemTracker::is_on() && _seq != 0) {
  1.1015 -    if (_need_thread_critical_lock) {
  1.1016 -      ThreadCritical tc;
  1.1017 -      MemTracker::dec_pending_op_count();
  1.1018 -    }
  1.1019 -    _seq = 0;
  1.1020 -  }
  1.1021 -}
  1.1022 -
  1.1023 -
  1.1024 -void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size,
  1.1025 -  MEMFLAGS flags, address pc) {
  1.1026 -  assert(old_addr != NULL && new_addr != NULL, "Sanity check");
  1.1027 -  assert(_op == Realloc || _op == NoOp, "Wrong call");
  1.1028 -  if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
  1.1029 -    assert(_seq > 0, "Need pre-reserve sequence number");
  1.1030 -    if (_need_thread_critical_lock) {
  1.1031 -      ThreadCritical tc;
  1.1032 -      // free old address, use pre-reserved sequence number
  1.1033 -      MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
  1.1034 -        0, _seq, pc, _java_thread);
  1.1035 -      MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
  1.1036 -        size, SequenceGenerator::next(), pc, _java_thread);
  1.1037 -      // decrement MemTracker pending_op_count
  1.1038 -      MemTracker::dec_pending_op_count();
  1.1039 -    } else {
  1.1040 -      // free old address, use pre-reserved sequence number
  1.1041 -      MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
  1.1042 -        0, _seq, pc, _java_thread);
  1.1043 -      MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
  1.1044 -        size, SequenceGenerator::next(), pc, _java_thread);
  1.1045 -    }
  1.1046 -    _seq = 0;
  1.1047 -  }
  1.1048 -}
  1.1049 -
  1.1050 -void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) {
  1.1051 -  // OOM already?
  1.1052 -  if (addr == NULL) return;
  1.1053 -
  1.1054 -  if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
  1.1055 -    bool pre_reserved_seq = (_seq != 0);
  1.1056 -    address  pc = CALLER_CALLER_PC;
  1.1057 -    MEMFLAGS orig_flags = flags;
  1.1058 -
  1.1059 -    // or the tagging flags
  1.1060 -    switch(_op) {
  1.1061 -      case Malloc:
  1.1062 -        flags |= MemPointerRecord::malloc_tag();
  1.1063 -        break;
  1.1064 -      case Free:
  1.1065 -        flags = MemPointerRecord::free_tag();
  1.1066 -        break;
  1.1067 -      case Realloc:
  1.1068 -        fatal("Use the other Tracker::record()");
  1.1069 -        break;
  1.1070 -      case Reserve:
  1.1071 -      case ReserveAndCommit:
  1.1072 -        flags |= MemPointerRecord::virtual_memory_reserve_tag();
  1.1073 -        break;
  1.1074 -      case Commit:
  1.1075 -        flags = MemPointerRecord::virtual_memory_commit_tag();
  1.1076 -        break;
  1.1077 -      case Type:
  1.1078 -        flags |= MemPointerRecord::virtual_memory_type_tag();
  1.1079 -        break;
  1.1080 -      case Uncommit:
  1.1081 -        assert(pre_reserved_seq, "Need pre-reserve sequence number");
  1.1082 -        flags = MemPointerRecord::virtual_memory_uncommit_tag();
  1.1083 -        break;
  1.1084 -      case Release:
  1.1085 -        assert(pre_reserved_seq, "Need pre-reserve sequence number");
  1.1086 -        flags = MemPointerRecord::virtual_memory_release_tag();
  1.1087 -        break;
  1.1088 -      case ArenaSize:
  1.1089 -        // a bit of hack here, add a small postive offset to arena
  1.1090 -        // address for its size record, so the size record is sorted
  1.1091 -        // right after arena record.
  1.1092 -        flags = MemPointerRecord::arena_size_tag();
  1.1093 -        addr += sizeof(void*);
  1.1094 -        break;
  1.1095 -      case StackRelease:
  1.1096 -        flags = MemPointerRecord::virtual_memory_release_tag();
  1.1097 -        break;
  1.1098 -      default:
  1.1099 -        ShouldNotReachHere();
  1.1100 -    }
  1.1101 -
  1.1102 -    // write memory tracking record
  1.1103 -    if (_need_thread_critical_lock) {
  1.1104 -      ThreadCritical tc;
  1.1105 -      if (_seq == 0) _seq = SequenceGenerator::next();
  1.1106 -      MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
  1.1107 -      if (_op == ReserveAndCommit) {
  1.1108 -        MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
  1.1109 -          size, SequenceGenerator::next(), pc, _java_thread);
  1.1110 -      }
  1.1111 -      if (pre_reserved_seq) MemTracker::dec_pending_op_count();
  1.1112 -    } else {
  1.1113 -      if (_seq == 0) _seq = SequenceGenerator::next();
  1.1114 -      MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
  1.1115 -      if (_op == ReserveAndCommit) {
  1.1116 -        MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
  1.1117 -          size, SequenceGenerator::next(), pc, _java_thread);
  1.1118 -      }
  1.1119 -    }
  1.1120 -    _seq = 0;
  1.1121 -  }
  1.1122 -}
  1.1123 -

mercurial