src/share/vm/services/memTracker.cpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24 #include "precompiled.hpp"
aoqi@0 25
aoqi@0 26 #include "oops/instanceKlass.hpp"
aoqi@0 27 #include "runtime/atomic.hpp"
aoqi@0 28 #include "runtime/interfaceSupport.hpp"
aoqi@0 29 #include "runtime/mutexLocker.hpp"
aoqi@0 30 #include "runtime/safepoint.hpp"
aoqi@0 31 #include "runtime/threadCritical.hpp"
aoqi@0 32 #include "runtime/vm_operations.hpp"
aoqi@0 33 #include "services/memPtr.hpp"
aoqi@0 34 #include "services/memReporter.hpp"
aoqi@0 35 #include "services/memTracker.hpp"
aoqi@0 36 #include "utilities/decoder.hpp"
aoqi@0 37 #include "utilities/defaultStream.hpp"
aoqi@0 38 #include "utilities/globalDefinitions.hpp"
aoqi@0 39
aoqi@0 40 bool NMT_track_callsite = false;
aoqi@0 41
aoqi@0 42 // walk all 'known' threads at NMT sync point, and collect their recorders
aoqi@0 43 void SyncThreadRecorderClosure::do_thread(Thread* thread) {
aoqi@0 44 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
aoqi@0 45 if (thread->is_Java_thread()) {
aoqi@0 46 JavaThread* javaThread = (JavaThread*)thread;
aoqi@0 47 MemRecorder* recorder = javaThread->get_recorder();
aoqi@0 48 if (recorder != NULL) {
aoqi@0 49 MemTracker::enqueue_pending_recorder(recorder);
aoqi@0 50 javaThread->set_recorder(NULL);
aoqi@0 51 }
aoqi@0 52 }
aoqi@0 53 _thread_count ++;
aoqi@0 54 }
aoqi@0 55
aoqi@0 56
aoqi@0 57 MemRecorder* volatile MemTracker::_global_recorder = NULL;
aoqi@0 58 MemSnapshot* MemTracker::_snapshot = NULL;
aoqi@0 59 MemBaseline MemTracker::_baseline;
aoqi@0 60 Mutex* MemTracker::_query_lock = NULL;
aoqi@0 61 MemRecorder* volatile MemTracker::_merge_pending_queue = NULL;
aoqi@0 62 MemRecorder* volatile MemTracker::_pooled_recorders = NULL;
aoqi@0 63 MemTrackWorker* MemTracker::_worker_thread = NULL;
aoqi@0 64 int MemTracker::_sync_point_skip_count = 0;
aoqi@0 65 MemTracker::NMTLevel MemTracker::_tracking_level = MemTracker::NMT_off;
aoqi@0 66 volatile MemTracker::NMTStates MemTracker::_state = NMT_uninited;
aoqi@0 67 MemTracker::ShutdownReason MemTracker::_reason = NMT_shutdown_none;
aoqi@0 68 int MemTracker::_thread_count = 255;
aoqi@0 69 volatile jint MemTracker::_pooled_recorder_count = 0;
aoqi@0 70 volatile unsigned long MemTracker::_processing_generation = 0;
aoqi@0 71 volatile bool MemTracker::_worker_thread_idle = false;
aoqi@0 72 volatile jint MemTracker::_pending_op_count = 0;
aoqi@0 73 volatile bool MemTracker::_slowdown_calling_thread = false;
aoqi@0 74 debug_only(intx MemTracker::_main_thread_tid = 0;)
aoqi@0 75 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;)
aoqi@0 76
aoqi@0 77 void MemTracker::init_tracking_options(const char* option_line) {
aoqi@0 78 _tracking_level = NMT_off;
aoqi@0 79 if (strcmp(option_line, "=summary") == 0) {
aoqi@0 80 _tracking_level = NMT_summary;
aoqi@0 81 } else if (strcmp(option_line, "=detail") == 0) {
aoqi@0 82 // detail relies on a stack-walking ability that may not
aoqi@0 83 // be available depending on platform and/or compiler flags
aoqi@0 84 #if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
aoqi@0 85 _tracking_level = NMT_detail;
aoqi@0 86 #else
aoqi@0 87 jio_fprintf(defaultStream::error_stream(),
aoqi@0 88 "NMT detail is not supported on this platform. Using NMT summary instead.\n");
aoqi@0 89 _tracking_level = NMT_summary;
aoqi@0 90 #endif
aoqi@0 91 } else if (strcmp(option_line, "=off") != 0) {
aoqi@0 92 vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
aoqi@0 93 }
aoqi@0 94 }
aoqi@0 95
aoqi@0 96 // first phase of bootstrapping, when VM is still in single-threaded mode.
aoqi@0 97 void MemTracker::bootstrap_single_thread() {
aoqi@0 98 if (_tracking_level > NMT_off) {
aoqi@0 99 assert(_state == NMT_uninited, "wrong state");
aoqi@0 100
aoqi@0 101 // NMT is not supported with UseMallocOnly is on. NMT can NOT
aoqi@0 102 // handle the amount of malloc data without significantly impacting
aoqi@0 103 // runtime performance when this flag is on.
aoqi@0 104 if (UseMallocOnly) {
aoqi@0 105 shutdown(NMT_use_malloc_only);
aoqi@0 106 return;
aoqi@0 107 }
aoqi@0 108
aoqi@0 109 _query_lock = new (std::nothrow) Mutex(Monitor::max_nonleaf, "NMT_queryLock");
aoqi@0 110 if (_query_lock == NULL) {
aoqi@0 111 shutdown(NMT_out_of_memory);
aoqi@0 112 return;
aoqi@0 113 }
aoqi@0 114
aoqi@0 115 debug_only(_main_thread_tid = os::current_thread_id();)
aoqi@0 116 _state = NMT_bootstrapping_single_thread;
aoqi@0 117 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
aoqi@0 118 }
aoqi@0 119 }
aoqi@0 120
aoqi@0 121 // second phase of bootstrapping, when VM is about to or already entered multi-theaded mode.
aoqi@0 122 void MemTracker::bootstrap_multi_thread() {
aoqi@0 123 if (_tracking_level > NMT_off && _state == NMT_bootstrapping_single_thread) {
aoqi@0 124 // create nmt lock for multi-thread execution
aoqi@0 125 assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
aoqi@0 126 _state = NMT_bootstrapping_multi_thread;
aoqi@0 127 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
aoqi@0 128 }
aoqi@0 129 }
aoqi@0 130
aoqi@0 131 // fully start nmt
aoqi@0 132 void MemTracker::start() {
aoqi@0 133 // Native memory tracking is off from command line option
aoqi@0 134 if (_tracking_level == NMT_off || shutdown_in_progress()) return;
aoqi@0 135
aoqi@0 136 assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
aoqi@0 137 assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
aoqi@0 138
aoqi@0 139 _snapshot = new (std::nothrow)MemSnapshot();
aoqi@0 140 if (_snapshot != NULL) {
aoqi@0 141 if (!_snapshot->out_of_memory() && start_worker(_snapshot)) {
aoqi@0 142 _state = NMT_started;
aoqi@0 143 NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
aoqi@0 144 return;
aoqi@0 145 }
aoqi@0 146
aoqi@0 147 delete _snapshot;
aoqi@0 148 _snapshot = NULL;
aoqi@0 149 }
aoqi@0 150
aoqi@0 151 // fail to start native memory tracking, shut it down
aoqi@0 152 shutdown(NMT_initialization);
aoqi@0 153 }
aoqi@0 154
aoqi@0 155 /**
aoqi@0 156 * Shutting down native memory tracking.
aoqi@0 157 * We can not shutdown native memory tracking immediately, so we just
aoqi@0 158 * setup shutdown pending flag, every native memory tracking component
aoqi@0 159 * should orderly shut itself down.
aoqi@0 160 *
aoqi@0 161 * The shutdown sequences:
aoqi@0 162 * 1. MemTracker::shutdown() sets MemTracker to shutdown pending state
aoqi@0 163 * 2. Worker thread calls MemTracker::final_shutdown(), which transites
aoqi@0 164 * MemTracker to final shutdown state.
aoqi@0 165 * 3. At sync point, MemTracker does final cleanup, before sets memory
aoqi@0 166 * tracking level to off to complete shutdown.
aoqi@0 167 */
aoqi@0 168 void MemTracker::shutdown(ShutdownReason reason) {
aoqi@0 169 if (_tracking_level == NMT_off) return;
aoqi@0 170
aoqi@0 171 if (_state <= NMT_bootstrapping_single_thread) {
aoqi@0 172 // we still in single thread mode, there is not contention
aoqi@0 173 _state = NMT_shutdown_pending;
aoqi@0 174 _reason = reason;
aoqi@0 175 } else {
aoqi@0 176 // we want to know who initialized shutdown
aoqi@0 177 if ((jint)NMT_started == Atomic::cmpxchg((jint)NMT_shutdown_pending,
aoqi@0 178 (jint*)&_state, (jint)NMT_started)) {
aoqi@0 179 _reason = reason;
aoqi@0 180 }
aoqi@0 181 }
aoqi@0 182 }
aoqi@0 183
aoqi@0 184 // final phase of shutdown
aoqi@0 185 void MemTracker::final_shutdown() {
aoqi@0 186 // delete all pending recorders and pooled recorders
aoqi@0 187 delete_all_pending_recorders();
aoqi@0 188 delete_all_pooled_recorders();
aoqi@0 189
aoqi@0 190 {
aoqi@0 191 // shared baseline and snapshot are the only objects needed to
aoqi@0 192 // create query results
aoqi@0 193 MutexLockerEx locker(_query_lock, true);
aoqi@0 194 // cleanup baseline data and snapshot
aoqi@0 195 _baseline.clear();
aoqi@0 196 delete _snapshot;
aoqi@0 197 _snapshot = NULL;
aoqi@0 198 }
aoqi@0 199
aoqi@0 200 // shutdown shared decoder instance, since it is only
aoqi@0 201 // used by native memory tracking so far.
aoqi@0 202 Decoder::shutdown();
aoqi@0 203
aoqi@0 204 MemTrackWorker* worker = NULL;
aoqi@0 205 {
aoqi@0 206 ThreadCritical tc;
aoqi@0 207 // can not delete worker inside the thread critical
aoqi@0 208 if (_worker_thread != NULL && Thread::current() == _worker_thread) {
aoqi@0 209 worker = _worker_thread;
aoqi@0 210 _worker_thread = NULL;
aoqi@0 211 }
aoqi@0 212 }
aoqi@0 213 if (worker != NULL) {
aoqi@0 214 delete worker;
aoqi@0 215 }
aoqi@0 216 _state = NMT_final_shutdown;
aoqi@0 217 }
aoqi@0 218
aoqi@0 219 // delete all pooled recorders
aoqi@0 220 void MemTracker::delete_all_pooled_recorders() {
aoqi@0 221 // free all pooled recorders
aoqi@0 222 MemRecorder* volatile cur_head = _pooled_recorders;
aoqi@0 223 if (cur_head != NULL) {
aoqi@0 224 MemRecorder* null_ptr = NULL;
aoqi@0 225 while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
aoqi@0 226 (void*)&_pooled_recorders, (void*)cur_head)) {
aoqi@0 227 cur_head = _pooled_recorders;
aoqi@0 228 }
aoqi@0 229 if (cur_head != NULL) {
aoqi@0 230 delete cur_head;
aoqi@0 231 _pooled_recorder_count = 0;
aoqi@0 232 }
aoqi@0 233 }
aoqi@0 234 }
aoqi@0 235
aoqi@0 236 // delete all recorders in pending queue
aoqi@0 237 void MemTracker::delete_all_pending_recorders() {
aoqi@0 238 // free all pending recorders
aoqi@0 239 MemRecorder* pending_head = get_pending_recorders();
aoqi@0 240 if (pending_head != NULL) {
aoqi@0 241 delete pending_head;
aoqi@0 242 }
aoqi@0 243 }
aoqi@0 244
aoqi@0 245 /*
aoqi@0 246 * retrieve per-thread recorder of specified thread.
aoqi@0 247 * if thread == NULL, it means global recorder
aoqi@0 248 */
aoqi@0 249 MemRecorder* MemTracker::get_thread_recorder(JavaThread* thread) {
aoqi@0 250 if (shutdown_in_progress()) return NULL;
aoqi@0 251
aoqi@0 252 MemRecorder* rc;
aoqi@0 253 if (thread == NULL) {
aoqi@0 254 rc = _global_recorder;
aoqi@0 255 } else {
aoqi@0 256 rc = thread->get_recorder();
aoqi@0 257 }
aoqi@0 258
aoqi@0 259 if (rc != NULL && rc->is_full()) {
aoqi@0 260 enqueue_pending_recorder(rc);
aoqi@0 261 rc = NULL;
aoqi@0 262 }
aoqi@0 263
aoqi@0 264 if (rc == NULL) {
aoqi@0 265 rc = get_new_or_pooled_instance();
aoqi@0 266 if (thread == NULL) {
aoqi@0 267 _global_recorder = rc;
aoqi@0 268 } else {
aoqi@0 269 thread->set_recorder(rc);
aoqi@0 270 }
aoqi@0 271 }
aoqi@0 272 return rc;
aoqi@0 273 }
aoqi@0 274
aoqi@0 275 /*
aoqi@0 276 * get a per-thread recorder from pool, or create a new one if
aoqi@0 277 * there is not one available.
aoqi@0 278 */
aoqi@0 279 MemRecorder* MemTracker::get_new_or_pooled_instance() {
aoqi@0 280 MemRecorder* cur_head = const_cast<MemRecorder*> (_pooled_recorders);
aoqi@0 281 if (cur_head == NULL) {
aoqi@0 282 MemRecorder* rec = new (std::nothrow)MemRecorder();
aoqi@0 283 if (rec == NULL || rec->out_of_memory()) {
aoqi@0 284 shutdown(NMT_out_of_memory);
aoqi@0 285 if (rec != NULL) {
aoqi@0 286 delete rec;
aoqi@0 287 rec = NULL;
aoqi@0 288 }
aoqi@0 289 }
aoqi@0 290 return rec;
aoqi@0 291 } else {
aoqi@0 292 MemRecorder* next_head = cur_head->next();
aoqi@0 293 if ((void*)cur_head != Atomic::cmpxchg_ptr((void*)next_head, (void*)&_pooled_recorders,
aoqi@0 294 (void*)cur_head)) {
aoqi@0 295 return get_new_or_pooled_instance();
aoqi@0 296 }
aoqi@0 297 cur_head->set_next(NULL);
aoqi@0 298 Atomic::dec(&_pooled_recorder_count);
aoqi@0 299 cur_head->set_generation();
aoqi@0 300 return cur_head;
aoqi@0 301 }
aoqi@0 302 }
aoqi@0 303
aoqi@0 304 /*
aoqi@0 305 * retrieve all recorders in pending queue, and empty the queue
aoqi@0 306 */
aoqi@0 307 MemRecorder* MemTracker::get_pending_recorders() {
aoqi@0 308 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
aoqi@0 309 MemRecorder* null_ptr = NULL;
aoqi@0 310 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr, (void*)&_merge_pending_queue,
aoqi@0 311 (void*)cur_head)) {
aoqi@0 312 cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
aoqi@0 313 }
aoqi@0 314 NOT_PRODUCT(Atomic::store(0, &_pending_recorder_count));
aoqi@0 315 return cur_head;
aoqi@0 316 }
aoqi@0 317
aoqi@0 318 /*
aoqi@0 319 * release a recorder to recorder pool.
aoqi@0 320 */
aoqi@0 321 void MemTracker::release_thread_recorder(MemRecorder* rec) {
aoqi@0 322 assert(rec != NULL, "null recorder");
aoqi@0 323 // we don't want to pool too many recorders
aoqi@0 324 rec->set_next(NULL);
aoqi@0 325 if (shutdown_in_progress() || _pooled_recorder_count > _thread_count * 2) {
aoqi@0 326 delete rec;
aoqi@0 327 return;
aoqi@0 328 }
aoqi@0 329
aoqi@0 330 rec->clear();
aoqi@0 331 MemRecorder* cur_head = const_cast<MemRecorder*>(_pooled_recorders);
aoqi@0 332 rec->set_next(cur_head);
aoqi@0 333 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_pooled_recorders,
aoqi@0 334 (void*)cur_head)) {
aoqi@0 335 cur_head = const_cast<MemRecorder*>(_pooled_recorders);
aoqi@0 336 rec->set_next(cur_head);
aoqi@0 337 }
aoqi@0 338 Atomic::inc(&_pooled_recorder_count);
aoqi@0 339 }
aoqi@0 340
aoqi@0 341 // write a record to proper recorder. No lock can be taken from this method
aoqi@0 342 // down.
aoqi@0 343 void MemTracker::write_tracking_record(address addr, MEMFLAGS flags,
aoqi@0 344 size_t size, jint seq, address pc, JavaThread* thread) {
aoqi@0 345
aoqi@0 346 MemRecorder* rc = get_thread_recorder(thread);
aoqi@0 347 if (rc != NULL) {
aoqi@0 348 rc->record(addr, flags, size, seq, pc);
aoqi@0 349 }
aoqi@0 350 }
aoqi@0 351
aoqi@0 352 /**
aoqi@0 353 * enqueue a recorder to pending queue
aoqi@0 354 */
aoqi@0 355 void MemTracker::enqueue_pending_recorder(MemRecorder* rec) {
aoqi@0 356 assert(rec != NULL, "null recorder");
aoqi@0 357
aoqi@0 358 // we are shutting down, so just delete it
aoqi@0 359 if (shutdown_in_progress()) {
aoqi@0 360 rec->set_next(NULL);
aoqi@0 361 delete rec;
aoqi@0 362 return;
aoqi@0 363 }
aoqi@0 364
aoqi@0 365 MemRecorder* cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
aoqi@0 366 rec->set_next(cur_head);
aoqi@0 367 while ((void*)cur_head != Atomic::cmpxchg_ptr((void*)rec, (void*)&_merge_pending_queue,
aoqi@0 368 (void*)cur_head)) {
aoqi@0 369 cur_head = const_cast<MemRecorder*>(_merge_pending_queue);
aoqi@0 370 rec->set_next(cur_head);
aoqi@0 371 }
aoqi@0 372 NOT_PRODUCT(Atomic::inc(&_pending_recorder_count);)
aoqi@0 373 }
aoqi@0 374
aoqi@0 375 /*
aoqi@0 376 * The method is called at global safepoint
aoqi@0 377 * during it synchronization process.
aoqi@0 378 * 1. enqueue all JavaThreads' per-thread recorders
aoqi@0 379 * 2. enqueue global recorder
aoqi@0 380 * 3. retrieve all pending recorders
aoqi@0 381 * 4. reset global sequence number generator
aoqi@0 382 * 5. call worker's sync
aoqi@0 383 */
aoqi@0 384 #define MAX_SAFEPOINTS_TO_SKIP 128
aoqi@0 385 #define SAFE_SEQUENCE_THRESHOLD 30
aoqi@0 386 #define HIGH_GENERATION_THRESHOLD 60
aoqi@0 387 #define MAX_RECORDER_THREAD_RATIO 30
aoqi@0 388 #define MAX_RECORDER_PER_THREAD 100
aoqi@0 389
aoqi@0 390 void MemTracker::sync() {
aoqi@0 391 assert(_tracking_level > NMT_off, "NMT is not enabled");
aoqi@0 392 assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required");
aoqi@0 393
aoqi@0 394 // Some GC tests hit large number of safepoints in short period of time
aoqi@0 395 // without meaningful activities. We should prevent going to
aoqi@0 396 // sync point in these cases, which can potentially exhaust generation buffer.
aoqi@0 397 // Here is the factots to determine if we should go into sync point:
aoqi@0 398 // 1. not to overflow sequence number
aoqi@0 399 // 2. if we are in danger to overflow generation buffer
aoqi@0 400 // 3. how many safepoints we already skipped sync point
aoqi@0 401 if (_state == NMT_started) {
aoqi@0 402 // worker thread is not ready, no one can manage generation
aoqi@0 403 // buffer, so skip this safepoint
aoqi@0 404 if (_worker_thread == NULL) return;
aoqi@0 405
aoqi@0 406 if (_sync_point_skip_count < MAX_SAFEPOINTS_TO_SKIP) {
aoqi@0 407 int per_seq_in_use = SequenceGenerator::peek() * 100 / max_jint;
aoqi@0 408 int per_gen_in_use = _worker_thread->generations_in_use() * 100 / MAX_GENERATIONS;
aoqi@0 409 if (per_seq_in_use < SAFE_SEQUENCE_THRESHOLD && per_gen_in_use >= HIGH_GENERATION_THRESHOLD) {
aoqi@0 410 _sync_point_skip_count ++;
aoqi@0 411 return;
aoqi@0 412 }
aoqi@0 413 }
aoqi@0 414 {
aoqi@0 415 // This method is running at safepoint, with ThreadCritical lock,
aoqi@0 416 // it should guarantee that NMT is fully sync-ed.
aoqi@0 417 ThreadCritical tc;
aoqi@0 418
aoqi@0 419 // We can NOT execute NMT sync-point if there are pending tracking ops.
aoqi@0 420 if (_pending_op_count == 0) {
aoqi@0 421 SequenceGenerator::reset();
aoqi@0 422 _sync_point_skip_count = 0;
aoqi@0 423
aoqi@0 424 // walk all JavaThreads to collect recorders
aoqi@0 425 SyncThreadRecorderClosure stc;
aoqi@0 426 Threads::threads_do(&stc);
aoqi@0 427
aoqi@0 428 _thread_count = stc.get_thread_count();
aoqi@0 429 MemRecorder* pending_recorders = get_pending_recorders();
aoqi@0 430
aoqi@0 431 if (_global_recorder != NULL) {
aoqi@0 432 _global_recorder->set_next(pending_recorders);
aoqi@0 433 pending_recorders = _global_recorder;
aoqi@0 434 _global_recorder = NULL;
aoqi@0 435 }
aoqi@0 436
aoqi@0 437 // see if NMT has too many outstanding recorder instances, it usually
aoqi@0 438 // means that worker thread is lagging behind in processing them.
aoqi@0 439 if (!AutoShutdownNMT) {
aoqi@0 440 _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
aoqi@0 441 } else {
aoqi@0 442 // If auto shutdown is on, enforce MAX_RECORDER_PER_THREAD threshold to prevent OOM
aoqi@0 443 if (MemRecorder::_instance_count >= _thread_count * MAX_RECORDER_PER_THREAD) {
aoqi@0 444 shutdown(NMT_out_of_memory);
aoqi@0 445 }
aoqi@0 446 }
aoqi@0 447
aoqi@0 448 // check _worker_thread with lock to avoid racing condition
aoqi@0 449 if (_worker_thread != NULL) {
aoqi@0 450 _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
aoqi@0 451 }
aoqi@0 452 assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
aoqi@0 453 } else {
aoqi@0 454 _sync_point_skip_count ++;
aoqi@0 455 }
aoqi@0 456 }
aoqi@0 457 }
aoqi@0 458
aoqi@0 459 // now, it is the time to shut whole things off
aoqi@0 460 if (_state == NMT_final_shutdown) {
aoqi@0 461 // walk all JavaThreads to delete all recorders
aoqi@0 462 SyncThreadRecorderClosure stc;
aoqi@0 463 Threads::threads_do(&stc);
aoqi@0 464 // delete global recorder
aoqi@0 465 {
aoqi@0 466 ThreadCritical tc;
aoqi@0 467 if (_global_recorder != NULL) {
aoqi@0 468 delete _global_recorder;
aoqi@0 469 _global_recorder = NULL;
aoqi@0 470 }
aoqi@0 471 }
aoqi@0 472 MemRecorder* pending_recorders = get_pending_recorders();
aoqi@0 473 if (pending_recorders != NULL) {
aoqi@0 474 delete pending_recorders;
aoqi@0 475 }
aoqi@0 476 // try at a later sync point to ensure MemRecorder instance drops to zero to
aoqi@0 477 // completely shutdown NMT
aoqi@0 478 if (MemRecorder::_instance_count == 0) {
aoqi@0 479 _state = NMT_shutdown;
aoqi@0 480 _tracking_level = NMT_off;
aoqi@0 481 }
aoqi@0 482 }
aoqi@0 483 }
aoqi@0 484
aoqi@0 485 /*
aoqi@0 486 * Start worker thread.
aoqi@0 487 */
aoqi@0 488 bool MemTracker::start_worker(MemSnapshot* snapshot) {
aoqi@0 489 assert(_worker_thread == NULL && _snapshot != NULL, "Just Check");
aoqi@0 490 _worker_thread = new (std::nothrow) MemTrackWorker(snapshot);
aoqi@0 491 if (_worker_thread == NULL) {
aoqi@0 492 return false;
aoqi@0 493 } else if (_worker_thread->has_error()) {
aoqi@0 494 delete _worker_thread;
aoqi@0 495 _worker_thread = NULL;
aoqi@0 496 return false;
aoqi@0 497 }
aoqi@0 498 _worker_thread->start();
aoqi@0 499 return true;
aoqi@0 500 }
aoqi@0 501
aoqi@0 502 /*
aoqi@0 503 * We need to collect a JavaThread's per-thread recorder
aoqi@0 504 * before it exits.
aoqi@0 505 */
aoqi@0 506 void MemTracker::thread_exiting(JavaThread* thread) {
aoqi@0 507 if (is_on()) {
aoqi@0 508 MemRecorder* rec = thread->get_recorder();
aoqi@0 509 if (rec != NULL) {
aoqi@0 510 enqueue_pending_recorder(rec);
aoqi@0 511 thread->set_recorder(NULL);
aoqi@0 512 }
aoqi@0 513 }
aoqi@0 514 }
aoqi@0 515
aoqi@0 516 // baseline current memory snapshot
aoqi@0 517 bool MemTracker::baseline() {
aoqi@0 518 MutexLocker lock(_query_lock);
aoqi@0 519 MemSnapshot* snapshot = get_snapshot();
aoqi@0 520 if (snapshot != NULL) {
aoqi@0 521 return _baseline.baseline(*snapshot, false);
aoqi@0 522 }
aoqi@0 523 return false;
aoqi@0 524 }
aoqi@0 525
aoqi@0 526 // print memory usage from current snapshot
aoqi@0 527 bool MemTracker::print_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
aoqi@0 528 MemBaseline baseline;
aoqi@0 529 MutexLocker lock(_query_lock);
aoqi@0 530 MemSnapshot* snapshot = get_snapshot();
aoqi@0 531 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
aoqi@0 532 BaselineReporter reporter(out, unit);
aoqi@0 533 reporter.report_baseline(baseline, summary_only);
aoqi@0 534 return true;
aoqi@0 535 }
aoqi@0 536 return false;
aoqi@0 537 }
aoqi@0 538
aoqi@0 539 // Whitebox API for blocking until the current generation of NMT data has been merged
aoqi@0 540 bool MemTracker::wbtest_wait_for_data_merge() {
aoqi@0 541 // NMT can't be shutdown while we're holding _query_lock
aoqi@0 542 MutexLocker lock(_query_lock);
aoqi@0 543 assert(_worker_thread != NULL, "Invalid query");
aoqi@0 544 // the generation at query time, so NMT will spin till this generation is processed
aoqi@0 545 unsigned long generation_at_query_time = SequenceGenerator::current_generation();
aoqi@0 546 unsigned long current_processing_generation = _processing_generation;
aoqi@0 547 // if generation counter overflown
aoqi@0 548 bool generation_overflown = (generation_at_query_time < current_processing_generation);
aoqi@0 549 long generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
aoqi@0 550 // spin
aoqi@0 551 while (!shutdown_in_progress()) {
aoqi@0 552 if (!generation_overflown) {
aoqi@0 553 if (current_processing_generation > generation_at_query_time) {
aoqi@0 554 return true;
aoqi@0 555 }
aoqi@0 556 } else {
aoqi@0 557 assert(generations_to_wrap >= 0, "Sanity check");
aoqi@0 558 long current_generations_to_wrap = MAX_UNSIGNED_LONG - current_processing_generation;
aoqi@0 559 assert(current_generations_to_wrap >= 0, "Sanity check");
aoqi@0 560 // to overflow an unsigned long should take long time, so to_wrap check should be sufficient
aoqi@0 561 if (current_generations_to_wrap > generations_to_wrap &&
aoqi@0 562 current_processing_generation > generation_at_query_time) {
aoqi@0 563 return true;
aoqi@0 564 }
aoqi@0 565 }
aoqi@0 566
aoqi@0 567 // if worker thread is idle, but generation is not advancing, that means
aoqi@0 568 // there is not safepoint to let NMT advance generation, force one.
aoqi@0 569 if (_worker_thread_idle) {
aoqi@0 570 VM_ForceSafepoint vfs;
aoqi@0 571 VMThread::execute(&vfs);
aoqi@0 572 }
aoqi@0 573 MemSnapshot* snapshot = get_snapshot();
aoqi@0 574 if (snapshot == NULL) {
aoqi@0 575 return false;
aoqi@0 576 }
aoqi@0 577 snapshot->wait(1000);
aoqi@0 578 current_processing_generation = _processing_generation;
aoqi@0 579 }
aoqi@0 580 // We end up here if NMT is shutting down before our data has been merged
aoqi@0 581 return false;
aoqi@0 582 }
aoqi@0 583
aoqi@0 584 // compare memory usage between current snapshot and baseline
aoqi@0 585 bool MemTracker::compare_memory_usage(BaselineOutputer& out, size_t unit, bool summary_only) {
aoqi@0 586 MutexLocker lock(_query_lock);
aoqi@0 587 if (_baseline.baselined()) {
aoqi@0 588 MemBaseline baseline;
aoqi@0 589 MemSnapshot* snapshot = get_snapshot();
aoqi@0 590 if (snapshot != NULL && baseline.baseline(*snapshot, summary_only)) {
aoqi@0 591 BaselineReporter reporter(out, unit);
aoqi@0 592 reporter.diff_baselines(baseline, _baseline, summary_only);
aoqi@0 593 return true;
aoqi@0 594 }
aoqi@0 595 }
aoqi@0 596 return false;
aoqi@0 597 }
aoqi@0 598
aoqi@0 599 #ifndef PRODUCT
aoqi@0 600 void MemTracker::walk_stack(int toSkip, char* buf, int len) {
aoqi@0 601 int cur_len = 0;
aoqi@0 602 char tmp[1024];
aoqi@0 603 address pc;
aoqi@0 604
aoqi@0 605 while (cur_len < len) {
aoqi@0 606 pc = os::get_caller_pc(toSkip + 1);
aoqi@0 607 if (pc != NULL && os::dll_address_to_function_name(pc, tmp, sizeof(tmp), NULL)) {
aoqi@0 608 jio_snprintf(&buf[cur_len], (len - cur_len), "%s\n", tmp);
aoqi@0 609 cur_len = (int)strlen(buf);
aoqi@0 610 } else {
aoqi@0 611 buf[cur_len] = '\0';
aoqi@0 612 break;
aoqi@0 613 }
aoqi@0 614 toSkip ++;
aoqi@0 615 }
aoqi@0 616 }
aoqi@0 617
aoqi@0 618 void MemTracker::print_tracker_stats(outputStream* st) {
aoqi@0 619 st->print_cr("\nMemory Tracker Stats:");
aoqi@0 620 st->print_cr("\tMax sequence number = %d", SequenceGenerator::max_seq_num());
aoqi@0 621 st->print_cr("\tthead count = %d", _thread_count);
aoqi@0 622 st->print_cr("\tArena instance = %d", Arena::_instance_count);
aoqi@0 623 st->print_cr("\tpooled recorder count = %d", _pooled_recorder_count);
aoqi@0 624 st->print_cr("\tqueued recorder count = %d", _pending_recorder_count);
aoqi@0 625 st->print_cr("\tmemory recorder instance count = %d", MemRecorder::_instance_count);
aoqi@0 626 if (_worker_thread != NULL) {
aoqi@0 627 st->print_cr("\tWorker thread:");
aoqi@0 628 st->print_cr("\t\tSync point count = %d", _worker_thread->_sync_point_count);
aoqi@0 629 st->print_cr("\t\tpending recorder count = %d", _worker_thread->count_pending_recorders());
aoqi@0 630 st->print_cr("\t\tmerge count = %d", _worker_thread->_merge_count);
aoqi@0 631 } else {
aoqi@0 632 st->print_cr("\tWorker thread is not started");
aoqi@0 633 }
aoqi@0 634 st->print_cr(" ");
aoqi@0 635
aoqi@0 636 if (_snapshot != NULL) {
aoqi@0 637 _snapshot->print_snapshot_stats(st);
aoqi@0 638 } else {
aoqi@0 639 st->print_cr("No snapshot");
aoqi@0 640 }
aoqi@0 641 }
aoqi@0 642 #endif
aoqi@0 643
aoqi@0 644
aoqi@0 645 // Tracker Implementation
aoqi@0 646
aoqi@0 647 /*
aoqi@0 648 * Create a tracker.
aoqi@0 649 * This is a fairly complicated constructor, as it has to make two important decisions:
aoqi@0 650 * 1) Does it need to take ThreadCritical lock to write tracking record
aoqi@0 651 * 2) Does it need to pre-reserve a sequence number for the tracking record
aoqi@0 652 *
aoqi@0 653 * The rules to determine if ThreadCritical is needed:
aoqi@0 654 * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
aoqi@0 655 * still in single thread mode.
aoqi@0 656 * 2. For all threads other than JavaThread, ThreadCritical is needed
aoqi@0 657 * to write to recorders to global recorder.
aoqi@0 658 * 3. For JavaThreads that are no longer visible by safepoint, also
aoqi@0 659 * need to take ThreadCritical and records are written to global
aoqi@0 660 * recorders, since these threads are NOT walked by Threads.do_thread().
aoqi@0 661 * 4. JavaThreads that are running in safepoint-safe states do not stop
aoqi@0 662 * for safepoints, ThreadCritical lock should be taken to write
aoqi@0 663 * memory records.
aoqi@0 664 * 5. JavaThreads that are running in VM state do not need any lock and
aoqi@0 665 * records are written to per-thread recorders.
aoqi@0 666 * 6. For a thread has yet to attach VM 'Thread', they need to take
aoqi@0 667 * ThreadCritical to write to global recorder.
aoqi@0 668 *
aoqi@0 669 * The memory operations that need pre-reserve sequence numbers:
aoqi@0 670 * The memory operations that "release" memory blocks and the
aoqi@0 671 * operations can fail, need to pre-reserve sequence number. They
aoqi@0 672 * are realloc, uncommit and release.
aoqi@0 673 *
aoqi@0 674 * The reason for pre-reserve sequence number, is to prevent race condition:
aoqi@0 675 * Thread 1 Thread 2
aoqi@0 676 * <release>
aoqi@0 677 * <allocate>
aoqi@0 678 * <write allocate record>
aoqi@0 679 * <write release record>
aoqi@0 680 * if Thread 2 happens to obtain the memory address Thread 1 just released,
aoqi@0 681 * then NMT can mistakenly report the memory is free.
aoqi@0 682 *
aoqi@0 683 * Noticeably, free() does not need pre-reserve sequence number, because the call
aoqi@0 684 * does not fail, so we can alway write "release" record before the memory is actaully
aoqi@0 685 * freed.
aoqi@0 686 *
aoqi@0 687 * For realloc, uncommit and release, following coding pattern should be used:
aoqi@0 688 *
aoqi@0 689 * MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
aoqi@0 690 * ptr = ::realloc(...);
aoqi@0 691 * if (ptr == NULL) {
aoqi@0 692 * tkr.record(...)
aoqi@0 693 * } else {
aoqi@0 694 * tkr.discard();
aoqi@0 695 * }
aoqi@0 696 *
aoqi@0 697 * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
aoqi@0 698 * if (uncommit(...)) {
aoqi@0 699 * tkr.record(...);
aoqi@0 700 * } else {
aoqi@0 701 * tkr.discard();
aoqi@0 702 * }
aoqi@0 703 *
aoqi@0 704 * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
aoqi@0 705 * if (release(...)) {
aoqi@0 706 * tkr.record(...);
aoqi@0 707 * } else {
aoqi@0 708 * tkr.discard();
aoqi@0 709 * }
aoqi@0 710 *
aoqi@0 711 * Since pre-reserved sequence number is only good for the generation that it is acquired,
aoqi@0 712 * when there is pending Tracker that reserved sequence number, NMT sync-point has
aoqi@0 713 * to be skipped to prevent from advancing generation. This is done by inc and dec
aoqi@0 714 * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped.
aoqi@0 715 * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads
aoqi@0 716 * that honor safepoints, safepoint can not occur during the memory operations, so the
aoqi@0 717 * pre-reserved sequence number won't cross the generation boundry.
aoqi@0 718 */
aoqi@0 719 MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) {
aoqi@0 720 _op = NoOp;
aoqi@0 721 _seq = 0;
aoqi@0 722 if (MemTracker::is_on()) {
aoqi@0 723 _java_thread = NULL;
aoqi@0 724 _op = op;
aoqi@0 725
aoqi@0 726 // figure out if ThreadCritical lock is needed to write this operation
aoqi@0 727 // to MemTracker
aoqi@0 728 if (MemTracker::is_single_threaded_bootstrap()) {
aoqi@0 729 thr = NULL;
aoqi@0 730 } else if (thr == NULL) {
aoqi@0 731 // don't use Thread::current(), since it is possible that
aoqi@0 732 // the calling thread has yet to attach to VM 'Thread',
aoqi@0 733 // which will result assertion failure
aoqi@0 734 thr = ThreadLocalStorage::thread();
aoqi@0 735 }
aoqi@0 736
aoqi@0 737 if (thr != NULL) {
aoqi@0 738 // Check NMT load
aoqi@0 739 MemTracker::check_NMT_load(thr);
aoqi@0 740
aoqi@0 741 if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) {
aoqi@0 742 _java_thread = (JavaThread*)thr;
aoqi@0 743 JavaThreadState state = _java_thread->thread_state();
aoqi@0 744 // JavaThreads that are safepoint safe, can run through safepoint,
aoqi@0 745 // so ThreadCritical is needed to ensure no threads at safepoint create
aoqi@0 746 // new records while the records are being gathered and the sequence number is changing
aoqi@0 747 _need_thread_critical_lock =
aoqi@0 748 SafepointSynchronize::safepoint_safe(_java_thread, state);
aoqi@0 749 } else {
aoqi@0 750 _need_thread_critical_lock = true;
aoqi@0 751 }
aoqi@0 752 } else {
aoqi@0 753 _need_thread_critical_lock
aoqi@0 754 = !MemTracker::is_single_threaded_bootstrap();
aoqi@0 755 }
aoqi@0 756
aoqi@0 757 // see if we need to pre-reserve sequence number for this operation
aoqi@0 758 if (_op == Realloc || _op == Uncommit || _op == Release) {
aoqi@0 759 if (_need_thread_critical_lock) {
aoqi@0 760 ThreadCritical tc;
aoqi@0 761 MemTracker::inc_pending_op_count();
aoqi@0 762 _seq = SequenceGenerator::next();
aoqi@0 763 } else {
aoqi@0 764 // for the threads that honor safepoints, no safepoint can occur
aoqi@0 765 // during the lifespan of tracker, so we don't need to increase
aoqi@0 766 // pending op count.
aoqi@0 767 _seq = SequenceGenerator::next();
aoqi@0 768 }
aoqi@0 769 }
aoqi@0 770 }
aoqi@0 771 }
aoqi@0 772
aoqi@0 773 void MemTracker::Tracker::discard() {
aoqi@0 774 if (MemTracker::is_on() && _seq != 0) {
aoqi@0 775 if (_need_thread_critical_lock) {
aoqi@0 776 ThreadCritical tc;
aoqi@0 777 MemTracker::dec_pending_op_count();
aoqi@0 778 }
aoqi@0 779 _seq = 0;
aoqi@0 780 }
aoqi@0 781 }
aoqi@0 782
aoqi@0 783
aoqi@0 784 void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size,
aoqi@0 785 MEMFLAGS flags, address pc) {
aoqi@0 786 assert(old_addr != NULL && new_addr != NULL, "Sanity check");
aoqi@0 787 assert(_op == Realloc || _op == NoOp, "Wrong call");
aoqi@0 788 if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
aoqi@0 789 assert(_seq > 0, "Need pre-reserve sequence number");
aoqi@0 790 if (_need_thread_critical_lock) {
aoqi@0 791 ThreadCritical tc;
aoqi@0 792 // free old address, use pre-reserved sequence number
aoqi@0 793 MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
aoqi@0 794 0, _seq, pc, _java_thread);
aoqi@0 795 MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
aoqi@0 796 size, SequenceGenerator::next(), pc, _java_thread);
aoqi@0 797 // decrement MemTracker pending_op_count
aoqi@0 798 MemTracker::dec_pending_op_count();
aoqi@0 799 } else {
aoqi@0 800 // free old address, use pre-reserved sequence number
aoqi@0 801 MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
aoqi@0 802 0, _seq, pc, _java_thread);
aoqi@0 803 MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
aoqi@0 804 size, SequenceGenerator::next(), pc, _java_thread);
aoqi@0 805 }
aoqi@0 806 _seq = 0;
aoqi@0 807 }
aoqi@0 808 }
aoqi@0 809
aoqi@0 810 void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) {
aoqi@0 811 // OOM already?
aoqi@0 812 if (addr == NULL) return;
aoqi@0 813
aoqi@0 814 if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
aoqi@0 815 bool pre_reserved_seq = (_seq != 0);
aoqi@0 816 address pc = CALLER_CALLER_PC;
aoqi@0 817 MEMFLAGS orig_flags = flags;
aoqi@0 818
aoqi@0 819 // or the tagging flags
aoqi@0 820 switch(_op) {
aoqi@0 821 case Malloc:
aoqi@0 822 flags |= MemPointerRecord::malloc_tag();
aoqi@0 823 break;
aoqi@0 824 case Free:
aoqi@0 825 flags = MemPointerRecord::free_tag();
aoqi@0 826 break;
aoqi@0 827 case Realloc:
aoqi@0 828 fatal("Use the other Tracker::record()");
aoqi@0 829 break;
aoqi@0 830 case Reserve:
aoqi@0 831 case ReserveAndCommit:
aoqi@0 832 flags |= MemPointerRecord::virtual_memory_reserve_tag();
aoqi@0 833 break;
aoqi@0 834 case Commit:
aoqi@0 835 flags = MemPointerRecord::virtual_memory_commit_tag();
aoqi@0 836 break;
aoqi@0 837 case Type:
aoqi@0 838 flags |= MemPointerRecord::virtual_memory_type_tag();
aoqi@0 839 break;
aoqi@0 840 case Uncommit:
aoqi@0 841 assert(pre_reserved_seq, "Need pre-reserve sequence number");
aoqi@0 842 flags = MemPointerRecord::virtual_memory_uncommit_tag();
aoqi@0 843 break;
aoqi@0 844 case Release:
aoqi@0 845 assert(pre_reserved_seq, "Need pre-reserve sequence number");
aoqi@0 846 flags = MemPointerRecord::virtual_memory_release_tag();
aoqi@0 847 break;
aoqi@0 848 case ArenaSize:
aoqi@0 849 // a bit of hack here, add a small postive offset to arena
aoqi@0 850 // address for its size record, so the size record is sorted
aoqi@0 851 // right after arena record.
aoqi@0 852 flags = MemPointerRecord::arena_size_tag();
aoqi@0 853 addr += sizeof(void*);
aoqi@0 854 break;
aoqi@0 855 case StackRelease:
aoqi@0 856 flags = MemPointerRecord::virtual_memory_release_tag();
aoqi@0 857 break;
aoqi@0 858 default:
aoqi@0 859 ShouldNotReachHere();
aoqi@0 860 }
aoqi@0 861
aoqi@0 862 // write memory tracking record
aoqi@0 863 if (_need_thread_critical_lock) {
aoqi@0 864 ThreadCritical tc;
aoqi@0 865 if (_seq == 0) _seq = SequenceGenerator::next();
aoqi@0 866 MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
aoqi@0 867 if (_op == ReserveAndCommit) {
aoqi@0 868 MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
aoqi@0 869 size, SequenceGenerator::next(), pc, _java_thread);
aoqi@0 870 }
aoqi@0 871 if (pre_reserved_seq) MemTracker::dec_pending_op_count();
aoqi@0 872 } else {
aoqi@0 873 if (_seq == 0) _seq = SequenceGenerator::next();
aoqi@0 874 MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
aoqi@0 875 if (_op == ReserveAndCommit) {
aoqi@0 876 MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
aoqi@0 877 size, SequenceGenerator::next(), pc, _java_thread);
aoqi@0 878 }
aoqi@0 879 }
aoqi@0 880 _seq = 0;
aoqi@0 881 }
aoqi@0 882 }
aoqi@0 883

mercurial