1.1 --- a/src/share/vm/services/memTracker.cpp Fri Feb 01 13:30:12 2013 -0500 1.2 +++ b/src/share/vm/services/memTracker.cpp Fri Feb 01 23:48:08 2013 +0100 1.3 @@ -29,6 +29,7 @@ 1.4 #include "runtime/mutexLocker.hpp" 1.5 #include "runtime/safepoint.hpp" 1.6 #include "runtime/threadCritical.hpp" 1.7 +#include "runtime/vm_operations.hpp" 1.8 #include "services/memPtr.hpp" 1.9 #include "services/memReporter.hpp" 1.10 #include "services/memTracker.hpp" 1.11 @@ -65,6 +66,8 @@ 1.12 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none; 1.13 int MemTracker::_thread_count = 255; 1.14 volatile jint MemTracker::_pooled_recorder_count = 0; 1.15 +volatile unsigned long MemTracker::_processing_generation = 0; 1.16 +volatile bool MemTracker::_worker_thread_idle = false; 1.17 debug_only(intx MemTracker::_main_thread_tid = 0;) 1.18 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) 1.19 1.20 @@ -279,7 +282,7 @@ 1.21 } 1.22 cur_head->set_next(NULL); 1.23 Atomic::dec(&_pooled_recorder_count); 1.24 - debug_only(cur_head->set_generation();) 1.25 + cur_head->set_generation(); 1.26 return cur_head; 1.27 } 1.28 } 1.29 @@ -570,6 +573,51 @@ 1.30 return false; 1.31 } 1.32 1.33 +// Whitebox API for blocking until the current generation of NMT data has been merged 1.34 +bool MemTracker::wbtest_wait_for_data_merge() { 1.35 + // NMT can't be shutdown while we're holding _query_lock 1.36 + MutexLockerEx lock(_query_lock, true); 1.37 + assert(_worker_thread != NULL, "Invalid query"); 1.38 + // the generation at query time, so NMT will spin till this generation is processed 1.39 + unsigned long generation_at_query_time = SequenceGenerator::current_generation(); 1.40 + unsigned long current_processing_generation = _processing_generation; 1.41 + // if generation counter overflown 1.42 + bool generation_overflown = (generation_at_query_time < current_processing_generation); 1.43 + long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; 1.44 + // spin 1.45 + while (!shutdown_in_progress()) { 1.46 + if (!generation_overflown) { 1.47 + if (current_processing_generation > generation_at_query_time) { 1.48 + return true; 1.49 + } 1.50 + } else { 1.51 + assert(generations_to_wrap >= 0, "Sanity check"); 1.52 + long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation; 1.53 + assert(current_generations_to_wrap >= 0, "Sanity check"); 1.54 + // to overflow an unsigned long should take long time, so to_wrap check should be sufficient 1.55 + if (current_generations_to_wrap > generations_to_wrap && 1.56 + current_processing_generation > generation_at_query_time) { 1.57 + return true; 1.58 + } 1.59 + } 1.60 + 1.61 + // if worker thread is idle, but generation is not advancing, that means 1.62 + // there is not safepoint to let NMT advance generation, force one. 1.63 + if (_worker_thread_idle) { 1.64 + VM_ForceSafepoint vfs; 1.65 + VMThread::execute(&vfs); 1.66 + } 1.67 + MemSnapshot* snapshot = get_snapshot(); 1.68 + if (snapshot == NULL) { 1.69 + return false; 1.70 + } 1.71 + snapshot->wait(1000); 1.72 + current_processing_generation = _processing_generation; 1.73 + } 1.74 + // We end up here if NMT is shutting down before our data has been merged 1.75 + return false; 1.76 +} 1.77 + 1.78 // compare memory usage between current snapshot and baseline 1.79 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) { 1.80 MutexLockerEx lock(_query_lock, true);