src/share/vm/jfr/recorder/storage/jfrStorage.cpp

Wed, 17 Jun 2020 11:43:05 +0300

author
apetushkov
date
Wed, 17 Jun 2020 11:43:05 +0300
changeset 9928
d2c2cd90513e
parent 9858
b985cbb00e68
permissions
-rw-r--r--

8220293: Deadlock in JFR string pool
Reviewed-by: rehn, egahlin

apetushkov@9858 1 /*
apetushkov@9858 2 * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
apetushkov@9858 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
apetushkov@9858 4 *
apetushkov@9858 5 * This code is free software; you can redistribute it and/or modify it
apetushkov@9858 6 * under the terms of the GNU General Public License version 2 only, as
apetushkov@9858 7 * published by the Free Software Foundation.
apetushkov@9858 8 *
apetushkov@9858 9 * This code is distributed in the hope that it will be useful, but WITHOUT
apetushkov@9858 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
apetushkov@9858 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
apetushkov@9858 12 * version 2 for more details (a copy is included in the LICENSE file that
apetushkov@9858 13 * accompanied this code).
apetushkov@9858 14 *
apetushkov@9858 15 * You should have received a copy of the GNU General Public License version
apetushkov@9858 16 * 2 along with this work; if not, write to the Free Software Foundation,
apetushkov@9858 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
apetushkov@9858 18 *
apetushkov@9858 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
apetushkov@9858 20 * or visit www.oracle.com if you need additional information or have any
apetushkov@9858 21 * questions.
apetushkov@9858 22 *
apetushkov@9858 23 */
apetushkov@9858 24
apetushkov@9858 25 #include "precompiled.hpp"
apetushkov@9858 26 #include "jfr/jfrEvents.hpp"
apetushkov@9858 27 #include "jfr/jni/jfrJavaSupport.hpp"
apetushkov@9858 28 #include "jfr/recorder/jfrRecorder.hpp"
apetushkov@9858 29 #include "jfr/recorder/repository/jfrChunkWriter.hpp"
apetushkov@9858 30 #include "jfr/recorder/service/jfrOptionSet.hpp"
apetushkov@9858 31 #include "jfr/recorder/service/jfrPostBox.hpp"
apetushkov@9858 32 #include "jfr/recorder/storage/jfrMemorySpace.inline.hpp"
apetushkov@9858 33 #include "jfr/recorder/storage/jfrStorage.hpp"
apetushkov@9858 34 #include "jfr/recorder/storage/jfrStorageControl.hpp"
apetushkov@9858 35 #include "jfr/recorder/storage/jfrStorageUtils.inline.hpp"
apetushkov@9858 36 #include "jfr/utilities/jfrIterator.hpp"
apetushkov@9858 37 #include "jfr/utilities/jfrTime.hpp"
apetushkov@9858 38 #include "jfr/writers/jfrNativeEventWriter.hpp"
apetushkov@9858 39 #include "runtime/mutexLocker.hpp"
apetushkov@9858 40 #include "runtime/orderAccess.hpp"
apetushkov@9858 41 #include "runtime/os.hpp"
apetushkov@9858 42 #include "runtime/safepoint.hpp"
apetushkov@9858 43 #include "runtime/thread.hpp"
apetushkov@9858 44
apetushkov@9858 45 typedef JfrStorage::Buffer* BufferPtr;
apetushkov@9858 46
apetushkov@9858 47 static JfrStorage* _instance = NULL;
apetushkov@9858 48 static JfrStorageControl* _control;
apetushkov@9858 49
apetushkov@9858 50 JfrStorage& JfrStorage::instance() {
apetushkov@9858 51 return *_instance;
apetushkov@9858 52 }
apetushkov@9858 53
apetushkov@9858 54 JfrStorage* JfrStorage::create(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) {
apetushkov@9858 55 assert(_instance == NULL, "invariant");
apetushkov@9858 56 _instance = new JfrStorage(chunkwriter, post_box);
apetushkov@9858 57 return _instance;
apetushkov@9858 58 }
apetushkov@9858 59
apetushkov@9858 60 void JfrStorage::destroy() {
apetushkov@9858 61 if (_instance != NULL) {
apetushkov@9858 62 delete _instance;
apetushkov@9858 63 _instance = NULL;
apetushkov@9858 64 }
apetushkov@9858 65 }
apetushkov@9858 66
apetushkov@9858 67 JfrStorage::JfrStorage(JfrChunkWriter& chunkwriter, JfrPostBox& post_box) :
apetushkov@9858 68 _control(NULL),
apetushkov@9858 69 _global_mspace(NULL),
apetushkov@9858 70 _thread_local_mspace(NULL),
apetushkov@9858 71 _transient_mspace(NULL),
apetushkov@9858 72 _age_mspace(NULL),
apetushkov@9858 73 _chunkwriter(chunkwriter),
apetushkov@9858 74 _post_box(post_box) {}
apetushkov@9858 75
apetushkov@9858 76 JfrStorage::~JfrStorage() {
apetushkov@9858 77 if (_control != NULL) {
apetushkov@9858 78 delete _control;
apetushkov@9858 79 }
apetushkov@9858 80 if (_global_mspace != NULL) {
apetushkov@9858 81 delete _global_mspace;
apetushkov@9858 82 }
apetushkov@9858 83 if (_thread_local_mspace != NULL) {
apetushkov@9858 84 delete _thread_local_mspace;
apetushkov@9858 85 }
apetushkov@9858 86 if (_transient_mspace != NULL) {
apetushkov@9858 87 delete _transient_mspace;
apetushkov@9858 88 }
apetushkov@9858 89 if (_age_mspace != NULL) {
apetushkov@9858 90 delete _age_mspace;
apetushkov@9858 91 }
apetushkov@9858 92 _instance = NULL;
apetushkov@9858 93 }
apetushkov@9858 94
apetushkov@9858 95 static const size_t in_memory_discard_threshold_delta = 2; // start to discard data when the only this number of free buffers are left
apetushkov@9858 96 static const size_t unlimited_mspace_size = 0;
apetushkov@9858 97 static const size_t thread_local_cache_count = 8;
apetushkov@9858 98 static const size_t thread_local_scavenge_threshold = thread_local_cache_count / 2;
apetushkov@9858 99 static const size_t transient_buffer_size_multiplier = 8; // against thread local buffer size
apetushkov@9858 100
apetushkov@9858 101 template <typename Mspace>
apetushkov@9858 102 static Mspace* create_mspace(size_t buffer_size, size_t limit, size_t cache_count, JfrStorage* storage_instance) {
apetushkov@9858 103 Mspace* mspace = new Mspace(buffer_size, limit, cache_count, storage_instance);
apetushkov@9858 104 if (mspace != NULL) {
apetushkov@9858 105 mspace->initialize();
apetushkov@9858 106 }
apetushkov@9858 107 return mspace;
apetushkov@9858 108 }
apetushkov@9858 109
apetushkov@9858 110 bool JfrStorage::initialize() {
apetushkov@9858 111 assert(_control == NULL, "invariant");
apetushkov@9858 112 assert(_global_mspace == NULL, "invariant");
apetushkov@9858 113 assert(_thread_local_mspace == NULL, "invariant");
apetushkov@9858 114 assert(_transient_mspace == NULL, "invariant");
apetushkov@9858 115 assert(_age_mspace == NULL, "invariant");
apetushkov@9858 116
apetushkov@9858 117 const size_t num_global_buffers = (size_t)JfrOptionSet::num_global_buffers();
apetushkov@9858 118 assert(num_global_buffers >= in_memory_discard_threshold_delta, "invariant");
apetushkov@9858 119 const size_t memory_size = (size_t)JfrOptionSet::memory_size();
apetushkov@9858 120 const size_t global_buffer_size = (size_t)JfrOptionSet::global_buffer_size();
apetushkov@9858 121 const size_t thread_buffer_size = (size_t)JfrOptionSet::thread_buffer_size();
apetushkov@9858 122
apetushkov@9858 123 _control = new JfrStorageControl(num_global_buffers, num_global_buffers - in_memory_discard_threshold_delta);
apetushkov@9858 124 if (_control == NULL) {
apetushkov@9858 125 return false;
apetushkov@9858 126 }
apetushkov@9858 127 _global_mspace = create_mspace<JfrStorageMspace>(global_buffer_size, memory_size, num_global_buffers, this);
apetushkov@9858 128 if (_global_mspace == NULL) {
apetushkov@9858 129 return false;
apetushkov@9858 130 }
apetushkov@9858 131 _thread_local_mspace = create_mspace<JfrThreadLocalMspace>(thread_buffer_size, unlimited_mspace_size, thread_local_cache_count, this);
apetushkov@9858 132 if (_thread_local_mspace == NULL) {
apetushkov@9858 133 return false;
apetushkov@9858 134 }
apetushkov@9858 135 _transient_mspace = create_mspace<JfrStorageMspace>(thread_buffer_size * transient_buffer_size_multiplier, unlimited_mspace_size, 0, this);
apetushkov@9858 136 if (_transient_mspace == NULL) {
apetushkov@9858 137 return false;
apetushkov@9858 138 }
apetushkov@9858 139 _age_mspace = create_mspace<JfrStorageAgeMspace>(0 /* no extra size except header */, unlimited_mspace_size, num_global_buffers, this);
apetushkov@9858 140 if (_age_mspace == NULL) {
apetushkov@9858 141 return false;
apetushkov@9858 142 }
apetushkov@9858 143 control().set_scavenge_threshold(thread_local_scavenge_threshold);
apetushkov@9858 144 return true;
apetushkov@9858 145 }
apetushkov@9858 146
apetushkov@9858 147 JfrStorageControl& JfrStorage::control() {
apetushkov@9858 148 return *instance()._control;
apetushkov@9858 149 }
apetushkov@9858 150
apetushkov@9858 151 static void log_allocation_failure(const char* msg, size_t size) {
apetushkov@9858 152 if (LogJFR) tty->print_cr("Unable to allocate " SIZE_FORMAT " bytes of %s.", size, msg);
apetushkov@9858 153 }
apetushkov@9858 154
apetushkov@9858 155 BufferPtr JfrStorage::acquire_thread_local(Thread* thread, size_t size /* 0 */) {
apetushkov@9858 156 BufferPtr buffer = mspace_get_to_full(size, instance()._thread_local_mspace, thread);
apetushkov@9858 157 if (buffer == NULL) {
apetushkov@9858 158 log_allocation_failure("thread local_memory", size);
apetushkov@9858 159 return NULL;
apetushkov@9858 160 }
apetushkov@9858 161 assert(buffer->acquired_by_self(), "invariant");
apetushkov@9858 162 return buffer;
apetushkov@9858 163 }
apetushkov@9858 164
apetushkov@9858 165 BufferPtr JfrStorage::acquire_transient(size_t size, Thread* thread) {
apetushkov@9858 166 BufferPtr buffer = mspace_allocate_transient_lease_to_full(size, instance()._transient_mspace, thread);
apetushkov@9858 167 if (buffer == NULL) {
apetushkov@9858 168 log_allocation_failure("transient memory", size);
apetushkov@9858 169 return NULL;
apetushkov@9858 170 }
apetushkov@9858 171 assert(buffer->acquired_by_self(), "invariant");
apetushkov@9858 172 assert(buffer->transient(), "invariant");
apetushkov@9858 173 assert(buffer->lease(), "invariant");
apetushkov@9858 174 return buffer;
apetushkov@9858 175 }
apetushkov@9858 176
apetushkov@9858 177 static BufferPtr get_lease(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
apetushkov@9858 178 assert(size <= mspace->min_elem_size(), "invariant");
apetushkov@9858 179 while (true) {
apetushkov@9858 180 BufferPtr t = mspace_get_free_lease_with_retry(size, mspace, retry_count, thread);
apetushkov@9858 181 if (t == NULL && storage_instance.control().should_discard()) {
apetushkov@9858 182 storage_instance.discard_oldest(thread);
apetushkov@9858 183 continue;
apetushkov@9858 184 }
apetushkov@9858 185 return t;
apetushkov@9858 186 }
apetushkov@9858 187 }
apetushkov@9858 188
apetushkov@9858 189 static BufferPtr get_promotion_buffer(size_t size, JfrStorageMspace* mspace, JfrStorage& storage_instance, size_t retry_count, Thread* thread) {
apetushkov@9858 190 assert(size <= mspace->min_elem_size(), "invariant");
apetushkov@9858 191 while (true) {
apetushkov@9858 192 BufferPtr t = mspace_get_free_with_retry(size, mspace, retry_count, thread);
apetushkov@9858 193 if (t == NULL && storage_instance.control().should_discard()) {
apetushkov@9858 194 storage_instance.discard_oldest(thread);
apetushkov@9858 195 continue;
apetushkov@9858 196 }
apetushkov@9858 197 return t;
apetushkov@9858 198 }
apetushkov@9858 199 }
apetushkov@9858 200
apetushkov@9858 201 static const size_t lease_retry = 10;
apetushkov@9858 202
apetushkov@9858 203 BufferPtr JfrStorage::acquire_large(size_t size, Thread* thread) {
apetushkov@9858 204 JfrStorage& storage_instance = instance();
apetushkov@9858 205 const size_t max_elem_size = storage_instance._global_mspace->min_elem_size(); // min is also max
apetushkov@9858 206 // if not too large and capacity is still available, ask for a lease from the global system
apetushkov@9858 207 if (size < max_elem_size && storage_instance.control().is_global_lease_allowed()) {
apetushkov@9858 208 BufferPtr const buffer = get_lease(size, storage_instance._global_mspace, storage_instance, lease_retry, thread);
apetushkov@9858 209 if (buffer != NULL) {
apetushkov@9858 210 assert(buffer->acquired_by_self(), "invariant");
apetushkov@9858 211 assert(!buffer->transient(), "invariant");
apetushkov@9858 212 assert(buffer->lease(), "invariant");
apetushkov@9858 213 storage_instance.control().increment_leased();
apetushkov@9858 214 return buffer;
apetushkov@9858 215 }
apetushkov@9858 216 }
apetushkov@9858 217 return acquire_transient(size, thread);
apetushkov@9858 218 }
apetushkov@9858 219
apetushkov@9858 220 static void write_data_loss_event(JfrBuffer* buffer, u8 unflushed_size, Thread* thread) {
apetushkov@9858 221 assert(buffer != NULL, "invariant");
apetushkov@9858 222 assert(buffer->empty(), "invariant");
apetushkov@9858 223 const u8 total_data_loss = thread->jfr_thread_local()->add_data_lost(unflushed_size);
apetushkov@9858 224 if (EventDataLoss::is_enabled()) {
apetushkov@9858 225 JfrNativeEventWriter writer(buffer, thread);
apetushkov@9858 226 writer.write<u8>(EventDataLoss::eventId);
apetushkov@9858 227 writer.write(JfrTicks::now());
apetushkov@9858 228 writer.write(unflushed_size);
apetushkov@9858 229 writer.write(total_data_loss);
apetushkov@9858 230 }
apetushkov@9858 231 }
apetushkov@9858 232
apetushkov@9858 233 static void write_data_loss(BufferPtr buffer, Thread* thread) {
apetushkov@9858 234 assert(buffer != NULL, "invariant");
apetushkov@9858 235 const size_t unflushed_size = buffer->unflushed_size();
apetushkov@9858 236 buffer->concurrent_reinitialization();
apetushkov@9858 237 if (unflushed_size == 0) {
apetushkov@9858 238 return;
apetushkov@9858 239 }
apetushkov@9858 240 write_data_loss_event(buffer, unflushed_size, thread);
apetushkov@9858 241 }
apetushkov@9858 242
apetushkov@9858 243 static const size_t promotion_retry = 100;
apetushkov@9858 244
apetushkov@9858 245 bool JfrStorage::flush_regular_buffer(BufferPtr buffer, Thread* thread) {
apetushkov@9858 246 assert(buffer != NULL, "invariant");
apetushkov@9858 247 assert(!buffer->lease(), "invariant");
apetushkov@9858 248 assert(!buffer->transient(), "invariant");
apetushkov@9858 249 const size_t unflushed_size = buffer->unflushed_size();
apetushkov@9858 250 if (unflushed_size == 0) {
apetushkov@9858 251 buffer->concurrent_reinitialization();
apetushkov@9858 252 assert(buffer->empty(), "invariant");
apetushkov@9858 253 return true;
apetushkov@9858 254 }
apetushkov@9858 255 BufferPtr const promotion_buffer = get_promotion_buffer(unflushed_size, _global_mspace, *this, promotion_retry, thread);
apetushkov@9858 256 if (promotion_buffer == NULL) {
apetushkov@9858 257 write_data_loss(buffer, thread);
apetushkov@9858 258 return false;
apetushkov@9858 259 }
apetushkov@9858 260 if (!JfrRecorder::is_shutting_down()) {
apetushkov@9858 261 assert(promotion_buffer->acquired_by_self(), "invariant");
apetushkov@9858 262 }
apetushkov@9858 263 assert(promotion_buffer->free_size() >= unflushed_size, "invariant");
apetushkov@9858 264 buffer->concurrent_move_and_reinitialize(promotion_buffer, unflushed_size);
apetushkov@9858 265 assert(buffer->empty(), "invariant");
apetushkov@9858 266 return true;
apetushkov@9858 267 }
apetushkov@9858 268
apetushkov@9858 269 /*
apetushkov@9858 270 * 1. If the buffer was a "lease" from the global system, release back.
apetushkov@9858 271 * 2. If the buffer is transient (temporal dynamically allocated), retire and register full.
apetushkov@9858 272 *
apetushkov@9858 273 * The buffer is effectively invalidated for the thread post-return,
apetushkov@9858 274 * and the caller should take means to ensure that it is not referenced any longer.
apetushkov@9858 275 */
apetushkov@9858 276 void JfrStorage::release_large(BufferPtr buffer, Thread* thread) {
apetushkov@9858 277 assert(buffer != NULL, "invariant");
apetushkov@9858 278 assert(buffer->lease(), "invariant");
apetushkov@9858 279 assert(buffer->acquired_by_self(), "invariant");
apetushkov@9858 280 buffer->clear_lease();
apetushkov@9858 281 if (buffer->transient()) {
apetushkov@9858 282 buffer->set_retired();
apetushkov@9858 283 register_full(buffer, thread);
apetushkov@9858 284 } else {
apetushkov@9858 285 buffer->release();
apetushkov@9858 286 control().decrement_leased();
apetushkov@9858 287 }
apetushkov@9858 288 }
apetushkov@9858 289
apetushkov@9858 290 static JfrAgeNode* new_age_node(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, Thread* thread) {
apetushkov@9858 291 assert(buffer != NULL, "invariant");
apetushkov@9858 292 assert(age_mspace != NULL, "invariant");
apetushkov@9858 293 return mspace_allocate_transient(0, age_mspace, thread);
apetushkov@9858 294 }
apetushkov@9858 295
apetushkov@9858 296 static void log_registration_failure(size_t unflushed_size) {
apetushkov@9858 297 if (LogJFR) tty->print_cr("Unable to register a full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
apetushkov@9858 298 if (LogJFR) tty->print_cr("Cleared 1 full buffer of " SIZE_FORMAT " bytes.", unflushed_size);
apetushkov@9858 299 }
apetushkov@9858 300
apetushkov@9858 301 static void handle_registration_failure(BufferPtr buffer) {
apetushkov@9858 302 assert(buffer != NULL, "invariant");
apetushkov@9858 303 assert(buffer->retired(), "invariant");
apetushkov@9858 304 const size_t unflushed_size = buffer->unflushed_size();
apetushkov@9858 305 buffer->reinitialize();
apetushkov@9858 306 log_registration_failure(unflushed_size);
apetushkov@9858 307 }
apetushkov@9858 308
apetushkov@9858 309 static JfrAgeNode* get_free_age_node(JfrStorageAgeMspace* age_mspace, Thread* thread) {
apetushkov@9858 310 assert(JfrBuffer_lock->owned_by_self(), "invariant");
apetushkov@9858 311 return mspace_get_free_with_detach(0, age_mspace, thread);
apetushkov@9858 312 }
apetushkov@9858 313
apetushkov@9858 314 static bool insert_full_age_node(JfrAgeNode* age_node, JfrStorageAgeMspace* age_mspace, Thread* thread) {
apetushkov@9858 315 assert(JfrBuffer_lock->owned_by_self(), "invariant");
apetushkov@9858 316 assert(age_node->retired_buffer()->retired(), "invariant");
apetushkov@9858 317 age_mspace->insert_full_head(age_node);
apetushkov@9858 318 return true;
apetushkov@9858 319 }
apetushkov@9858 320
apetushkov@9858 321 static bool full_buffer_registration(BufferPtr buffer, JfrStorageAgeMspace* age_mspace, JfrStorageControl& control, Thread* thread) {
apetushkov@9858 322 assert(buffer != NULL, "invariant");
apetushkov@9858 323 assert(buffer->retired(), "invariant");
apetushkov@9858 324 assert(age_mspace != NULL, "invariant");
apetushkov@9858 325 MutexLockerEx lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
apetushkov@9858 326 JfrAgeNode* age_node = get_free_age_node(age_mspace, thread);
apetushkov@9858 327 if (age_node == NULL) {
apetushkov@9858 328 age_node = new_age_node(buffer, age_mspace, thread);
apetushkov@9858 329 if (age_node == NULL) {
apetushkov@9858 330 return false;
apetushkov@9858 331 }
apetushkov@9858 332 }
apetushkov@9858 333 assert(age_node->acquired_by_self(), "invariant");
apetushkov@9858 334 assert(age_node != NULL, "invariant");
apetushkov@9858 335 age_node->set_retired_buffer(buffer);
apetushkov@9858 336 control.increment_full();
apetushkov@9858 337 return insert_full_age_node(age_node, age_mspace, thread);
apetushkov@9858 338 }
apetushkov@9858 339
apetushkov@9858 340 void JfrStorage::register_full(BufferPtr buffer, Thread* thread) {
apetushkov@9858 341 assert(buffer != NULL, "invariant");
apetushkov@9858 342 assert(buffer->retired(), "invariant");
apetushkov@9928 343 assert(buffer->acquired_by(thread), "invariant");
apetushkov@9858 344 if (!full_buffer_registration(buffer, _age_mspace, control(), thread)) {
apetushkov@9858 345 handle_registration_failure(buffer);
apetushkov@9858 346 }
apetushkov@9858 347 if (control().should_post_buffer_full_message()) {
apetushkov@9858 348 _post_box.post(MSG_FULLBUFFER);
apetushkov@9858 349 }
apetushkov@9858 350 }
apetushkov@9858 351
apetushkov@9858 352 void JfrStorage::lock() {
apetushkov@9858 353 assert(!JfrBuffer_lock->owned_by_self(), "invariant");
apetushkov@9858 354 JfrBuffer_lock->lock_without_safepoint_check();
apetushkov@9858 355 }
apetushkov@9858 356
apetushkov@9858 357 void JfrStorage::unlock() {
apetushkov@9858 358 assert(JfrBuffer_lock->owned_by_self(), "invariant");
apetushkov@9858 359 JfrBuffer_lock->unlock();
apetushkov@9858 360 }
apetushkov@9858 361
apetushkov@9858 362 #ifdef ASSERT
apetushkov@9858 363 bool JfrStorage::is_locked() const {
apetushkov@9858 364 return JfrBuffer_lock->owned_by_self();
apetushkov@9858 365 }
apetushkov@9858 366 #endif
apetushkov@9858 367
apetushkov@9858 368 // don't use buffer on return, it is gone
apetushkov@9858 369 void JfrStorage::release(BufferPtr buffer, Thread* thread) {
apetushkov@9858 370 assert(buffer != NULL, "invariant");
apetushkov@9858 371 assert(!buffer->lease(), "invariant");
apetushkov@9858 372 assert(!buffer->transient(), "invariant");
apetushkov@9858 373 assert(!buffer->retired(), "invariant");
apetushkov@9858 374 if (!buffer->empty()) {
apetushkov@9858 375 if (!flush_regular_buffer(buffer, thread)) {
apetushkov@9858 376 buffer->concurrent_reinitialization();
apetushkov@9858 377 }
apetushkov@9858 378 }
apetushkov@9858 379 assert(buffer->empty(), "invariant");
apetushkov@9928 380 assert(buffer->identity() != NULL, "invariant");
apetushkov@9858 381 control().increment_dead();
apetushkov@9858 382 buffer->set_retired();
apetushkov@9858 383 }
apetushkov@9858 384
apetushkov@9858 385 void JfrStorage::release_thread_local(BufferPtr buffer, Thread* thread) {
apetushkov@9858 386 assert(buffer != NULL, "invariant");
apetushkov@9858 387 JfrStorage& storage_instance = instance();
apetushkov@9858 388 storage_instance.release(buffer, thread);
apetushkov@9858 389 if (storage_instance.control().should_scavenge()) {
apetushkov@9858 390 storage_instance._post_box.post(MSG_DEADBUFFER);
apetushkov@9858 391 }
apetushkov@9858 392 }
apetushkov@9858 393
apetushkov@9858 394 static void log_discard(size_t count, size_t amount, size_t current) {
apetushkov@9858 395 assert(count > 0, "invariant");
apetushkov@9858 396 if (LogJFR) tty->print_cr("Cleared " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" bytes.", count, amount);
apetushkov@9858 397 if (LogJFR) tty->print_cr("Current number of full buffers " SIZE_FORMAT "", current);
apetushkov@9858 398 }
apetushkov@9858 399
apetushkov@9858 400 void JfrStorage::discard_oldest(Thread* thread) {
apetushkov@9858 401 if (JfrBuffer_lock->try_lock()) {
apetushkov@9858 402 if (!control().should_discard()) {
apetushkov@9858 403 // another thread handled it
apetushkov@9858 404 return;
apetushkov@9858 405 }
apetushkov@9858 406 const size_t num_full_pre_discard = control().full_count();
apetushkov@9858 407 size_t num_full_post_discard = 0;
apetushkov@9858 408 size_t discarded_size = 0;
apetushkov@9858 409 while (true) {
apetushkov@9858 410 JfrAgeNode* const oldest_age_node = _age_mspace->full_tail();
apetushkov@9858 411 if (oldest_age_node == NULL) {
apetushkov@9858 412 break;
apetushkov@9858 413 }
apetushkov@9858 414 BufferPtr const buffer = oldest_age_node->retired_buffer();
apetushkov@9858 415 assert(buffer->retired(), "invariant");
apetushkov@9858 416 discarded_size += buffer->unflushed_size();
apetushkov@9858 417 num_full_post_discard = control().decrement_full();
apetushkov@9858 418 if (buffer->transient()) {
apetushkov@9858 419 mspace_release_full(buffer, _transient_mspace);
apetushkov@9858 420 mspace_release_full(oldest_age_node, _age_mspace);
apetushkov@9858 421 continue;
apetushkov@9858 422 } else {
apetushkov@9858 423 mspace_release_full(oldest_age_node, _age_mspace);
apetushkov@9858 424 buffer->reinitialize();
apetushkov@9858 425 buffer->release(); // pusb
apetushkov@9858 426 break;
apetushkov@9858 427 }
apetushkov@9858 428 }
apetushkov@9858 429 JfrBuffer_lock->unlock();
apetushkov@9858 430 const size_t number_of_discards = num_full_pre_discard - num_full_post_discard;
apetushkov@9858 431 if (number_of_discards > 0) {
apetushkov@9858 432 log_discard(number_of_discards, discarded_size, num_full_post_discard);
apetushkov@9858 433 }
apetushkov@9858 434 }
apetushkov@9858 435 }
apetushkov@9858 436
apetushkov@9858 437 #ifdef ASSERT
apetushkov@9858 438 typedef const BufferPtr ConstBufferPtr;
apetushkov@9858 439
apetushkov@9858 440 static void assert_flush_precondition(ConstBufferPtr cur, size_t used, bool native, const Thread* t) {
apetushkov@9858 441 assert(t != NULL, "invariant");
apetushkov@9858 442 assert(cur != NULL, "invariant");
apetushkov@9858 443 assert(cur->pos() + used <= cur->end(), "invariant");
apetushkov@9858 444 assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
apetushkov@9858 445 }
apetushkov@9858 446
apetushkov@9858 447 static void assert_flush_regular_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, const Thread* t) {
apetushkov@9858 448 assert(t != NULL, "invariant");
apetushkov@9858 449 assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
apetushkov@9858 450 assert(cur != NULL, "invariant");
apetushkov@9858 451 assert(!cur->lease(), "invariant");
apetushkov@9858 452 assert(cur_pos != NULL, "invariant");
apetushkov@9858 453 assert(req >= used, "invariant");
apetushkov@9858 454 }
apetushkov@9858 455
apetushkov@9858 456 static void assert_provision_large_precondition(ConstBufferPtr cur, size_t used, size_t req, const Thread* t) {
apetushkov@9858 457 assert(cur != NULL, "invariant");
apetushkov@9858 458 assert(t != NULL, "invariant");
apetushkov@9858 459 assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
apetushkov@9858 460 assert(req >= used, "invariant");
apetushkov@9858 461 }
apetushkov@9858 462
apetushkov@9858 463 static void assert_flush_large_precondition(ConstBufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
apetushkov@9858 464 assert(t != NULL, "invariant");
apetushkov@9858 465 assert(cur != NULL, "invariant");
apetushkov@9858 466 assert(cur->lease(), "invariant");
apetushkov@9858 467 assert(cur_pos != NULL, "invariant");
apetushkov@9858 468 assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
apetushkov@9858 469 assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
apetushkov@9858 470 assert(req >= used, "invariant");
apetushkov@9858 471 assert(cur != t->jfr_thread_local()->shelved_buffer(), "invariant");
apetushkov@9858 472 }
apetushkov@9858 473 #endif // ASSERT
apetushkov@9858 474
apetushkov@9858 475 BufferPtr JfrStorage::flush(BufferPtr cur, size_t used, size_t req, bool native, Thread* t) {
apetushkov@9858 476 debug_only(assert_flush_precondition(cur, used, native, t);)
apetushkov@9858 477 const u1* const cur_pos = cur->pos();
apetushkov@9858 478 req += used;
apetushkov@9858 479 // requested size now encompass the outstanding used size
apetushkov@9858 480 return cur->lease() ? instance().flush_large(cur, cur_pos, used, req, native, t) :
apetushkov@9858 481 instance().flush_regular(cur, cur_pos, used, req, native, t);
apetushkov@9858 482 }
apetushkov@9858 483
apetushkov@9858 484 BufferPtr JfrStorage::flush_regular(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
apetushkov@9858 485 debug_only(assert_flush_regular_precondition(cur, cur_pos, used, req, t);)
apetushkov@9858 486 // A flush is needed before memcpy since a non-large buffer is thread stable
apetushkov@9858 487 // (thread local). The flush will not modify memory in addresses above pos()
apetushkov@9858 488 // which is where the "used / uncommitted" data resides. It is therefore both
apetushkov@9858 489 // possible and valid to migrate data after the flush. This is however only
apetushkov@9858 490 // the case for stable thread local buffers; it is not the case for large buffers.
apetushkov@9858 491 if (!cur->empty()) {
apetushkov@9858 492 flush_regular_buffer(cur, t);
apetushkov@9858 493 }
apetushkov@9858 494 assert(t->jfr_thread_local()->shelved_buffer() == NULL, "invariant");
apetushkov@9858 495 if (cur->free_size() >= req) {
apetushkov@9858 496 // simplest case, no switching of buffers
apetushkov@9858 497 if (used > 0) {
apetushkov@9858 498 memcpy(cur->pos(), (void*)cur_pos, used);
apetushkov@9858 499 }
apetushkov@9858 500 assert(native ? t->jfr_thread_local()->native_buffer() == cur : t->jfr_thread_local()->java_buffer() == cur, "invariant");
apetushkov@9858 501 return cur;
apetushkov@9858 502 }
apetushkov@9858 503 // Going for a "larger-than-regular" buffer.
apetushkov@9858 504 // Shelve the current buffer to make room for a temporary lease.
apetushkov@9858 505 t->jfr_thread_local()->shelve_buffer(cur);
apetushkov@9858 506 return provision_large(cur, cur_pos, used, req, native, t);
apetushkov@9858 507 }
apetushkov@9858 508
apetushkov@9858 509 static BufferPtr store_buffer_to_thread_local(BufferPtr buffer, JfrThreadLocal* jfr_thread_local, bool native) {
apetushkov@9858 510 assert(buffer != NULL, "invariant");
apetushkov@9858 511 if (native) {
apetushkov@9858 512 jfr_thread_local->set_native_buffer(buffer);
apetushkov@9858 513 } else {
apetushkov@9858 514 jfr_thread_local->set_java_buffer(buffer);
apetushkov@9858 515 }
apetushkov@9858 516 return buffer;
apetushkov@9858 517 }
apetushkov@9858 518
apetushkov@9858 519 static BufferPtr restore_shelved_buffer(bool native, Thread* t) {
apetushkov@9858 520 JfrThreadLocal* const tl = t->jfr_thread_local();
apetushkov@9858 521 BufferPtr shelved = tl->shelved_buffer();
apetushkov@9858 522 assert(shelved != NULL, "invariant");
apetushkov@9858 523 tl->shelve_buffer(NULL);
apetushkov@9858 524 // restore shelved buffer back as primary
apetushkov@9858 525 return store_buffer_to_thread_local(shelved, tl, native);
apetushkov@9858 526 }
apetushkov@9858 527
apetushkov@9858 528 BufferPtr JfrStorage::flush_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
apetushkov@9858 529 debug_only(assert_flush_large_precondition(cur, cur_pos, used, req, native, t);)
apetushkov@9858 530 // Can the "regular" buffer (now shelved) accommodate the requested size?
apetushkov@9858 531 BufferPtr shelved = t->jfr_thread_local()->shelved_buffer();
apetushkov@9858 532 assert(shelved != NULL, "invariant");
apetushkov@9858 533 if (shelved->free_size() >= req) {
apetushkov@9858 534 if (req > 0) {
apetushkov@9858 535 memcpy(shelved->pos(), (void*)cur_pos, (size_t)used);
apetushkov@9858 536 }
apetushkov@9858 537 // release and invalidate
apetushkov@9858 538 release_large(cur, t);
apetushkov@9858 539 return restore_shelved_buffer(native, t);
apetushkov@9858 540 }
apetushkov@9858 541 // regular too small
apetushkov@9858 542 return provision_large(cur, cur_pos, used, req, native, t);
apetushkov@9858 543 }
apetushkov@9858 544
apetushkov@9858 545 static BufferPtr large_fail(BufferPtr cur, bool native, JfrStorage& storage_instance, Thread* t) {
apetushkov@9858 546 assert(cur != NULL, "invariant");
apetushkov@9858 547 assert(t != NULL, "invariant");
apetushkov@9858 548 if (cur->lease()) {
apetushkov@9858 549 storage_instance.release_large(cur, t);
apetushkov@9858 550 }
apetushkov@9858 551 return restore_shelved_buffer(native, t);
apetushkov@9858 552 }
apetushkov@9858 553
apetushkov@9858 554 // Always returns a non-null buffer.
apetushkov@9858 555 // If accommodating the large request fails, the shelved buffer is returned
apetushkov@9858 556 // even though it might be smaller than the requested size.
apetushkov@9858 557 // Caller needs to ensure if the size was successfully accommodated.
apetushkov@9858 558 BufferPtr JfrStorage::provision_large(BufferPtr cur, const u1* const cur_pos, size_t used, size_t req, bool native, Thread* t) {
apetushkov@9858 559 debug_only(assert_provision_large_precondition(cur, used, req, t);)
apetushkov@9858 560 assert(t->jfr_thread_local()->shelved_buffer() != NULL, "invariant");
apetushkov@9858 561 BufferPtr const buffer = acquire_large(req, t);
apetushkov@9858 562 if (buffer == NULL) {
apetushkov@9858 563 // unable to allocate and serve the request
apetushkov@9858 564 return large_fail(cur, native, *this, t);
apetushkov@9858 565 }
apetushkov@9858 566 // ok managed to acquire a "large" buffer for the requested size
apetushkov@9858 567 assert(buffer->free_size() >= req, "invariant");
apetushkov@9858 568 assert(buffer->lease(), "invariant");
apetushkov@9858 569 // transfer outstanding data
apetushkov@9858 570 memcpy(buffer->pos(), (void*)cur_pos, used);
apetushkov@9858 571 if (cur->lease()) {
apetushkov@9858 572 release_large(cur, t);
apetushkov@9858 573 // don't use current anymore, it is gone
apetushkov@9858 574 }
apetushkov@9858 575 return store_buffer_to_thread_local(buffer, t->jfr_thread_local(), native);
apetushkov@9858 576 }
apetushkov@9858 577
apetushkov@9858 578 typedef UnBufferedWriteToChunk<JfrBuffer> WriteOperation;
apetushkov@9858 579 typedef MutexedWriteOp<WriteOperation> MutexedWriteOperation;
apetushkov@9858 580 typedef ConcurrentWriteOp<WriteOperation> ConcurrentWriteOperation;
apetushkov@9858 581 typedef ConcurrentWriteOpExcludeRetired<WriteOperation> ThreadLocalConcurrentWriteOperation;
apetushkov@9858 582
apetushkov@9858 583 size_t JfrStorage::write() {
apetushkov@9858 584 const size_t full_size_processed = write_full();
apetushkov@9858 585 WriteOperation wo(_chunkwriter);
apetushkov@9858 586 ThreadLocalConcurrentWriteOperation tlwo(wo);
apetushkov@9858 587 process_full_list(tlwo, _thread_local_mspace);
apetushkov@9858 588 ConcurrentWriteOperation cwo(wo);
apetushkov@9858 589 process_free_list(cwo, _global_mspace);
apetushkov@9858 590 return full_size_processed + wo.processed();
apetushkov@9858 591 }
apetushkov@9858 592
apetushkov@9858 593 size_t JfrStorage::write_at_safepoint() {
apetushkov@9858 594 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
apetushkov@9858 595 WriteOperation wo(_chunkwriter);
apetushkov@9858 596 MutexedWriteOperation writer(wo); // mutexed write mode
apetushkov@9858 597 process_full_list(writer, _thread_local_mspace);
apetushkov@9858 598 assert(_transient_mspace->is_free_empty(), "invariant");
apetushkov@9858 599 process_full_list(writer, _transient_mspace);
apetushkov@9858 600 assert(_global_mspace->is_full_empty(), "invariant");
apetushkov@9858 601 process_free_list(writer, _global_mspace);
apetushkov@9858 602 return wo.processed();
apetushkov@9858 603 }
apetushkov@9858 604
apetushkov@9858 605 typedef DiscardOp<DefaultDiscarder<JfrStorage::Buffer> > DiscardOperation;
apetushkov@9858 606 typedef ReleaseOp<JfrStorageMspace> ReleaseOperation;
apetushkov@9858 607 typedef CompositeOperation<MutexedWriteOperation, ReleaseOperation> FullOperation;
apetushkov@9858 608
apetushkov@9858 609 size_t JfrStorage::clear() {
apetushkov@9858 610 const size_t full_size_processed = clear_full();
apetushkov@9858 611 DiscardOperation discarder(concurrent); // concurrent discard mode
apetushkov@9858 612 process_full_list(discarder, _thread_local_mspace);
apetushkov@9858 613 assert(_transient_mspace->is_free_empty(), "invariant");
apetushkov@9858 614 process_full_list(discarder, _transient_mspace);
apetushkov@9858 615 assert(_global_mspace->is_full_empty(), "invariant");
apetushkov@9858 616 process_free_list(discarder, _global_mspace);
apetushkov@9858 617 return full_size_processed + discarder.processed();
apetushkov@9858 618 }
apetushkov@9858 619
apetushkov@9858 620 static void insert_free_age_nodes(JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, JfrAgeNode* tail, size_t count) {
apetushkov@9858 621 if (tail != NULL) {
apetushkov@9858 622 assert(tail->next() == NULL, "invariant");
apetushkov@9858 623 assert(head != NULL, "invariant");
apetushkov@9858 624 assert(head->prev() == NULL, "invariant");
apetushkov@9858 625 MutexLockerEx buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
apetushkov@9858 626 age_mspace->insert_free_tail(head, tail, count);
apetushkov@9858 627 }
apetushkov@9858 628 }
apetushkov@9858 629
apetushkov@9858 630 template <typename Processor>
apetushkov@9858 631 static void process_age_list(Processor& processor, JfrStorageAgeMspace* age_mspace, JfrAgeNode* head, size_t count) {
apetushkov@9858 632 assert(age_mspace != NULL, "invariant");
apetushkov@9858 633 assert(head != NULL, "invariant");
apetushkov@9858 634 assert(count > 0, "invariant");
apetushkov@9858 635 JfrAgeNode* node = head;
apetushkov@9858 636 JfrAgeNode* last = NULL;
apetushkov@9858 637 while (node != NULL) {
apetushkov@9858 638 last = node;
apetushkov@9858 639 BufferPtr const buffer = node->retired_buffer();
apetushkov@9858 640 assert(buffer != NULL, "invariant");
apetushkov@9858 641 assert(buffer->retired(), "invariant");
apetushkov@9858 642 processor.process(buffer);
apetushkov@9858 643 // at this point, buffer is already live or destroyed
apetushkov@9858 644 node->clear_identity();
apetushkov@9858 645 JfrAgeNode* const next = (JfrAgeNode*)node->next();
apetushkov@9858 646 if (node->transient()) {
apetushkov@9858 647 // detach
apetushkov@9858 648 last = (JfrAgeNode*)last->prev();
apetushkov@9858 649 if (last != NULL) {
apetushkov@9858 650 last->set_next(next);
apetushkov@9858 651 } else {
apetushkov@9858 652 head = next;
apetushkov@9858 653 }
apetushkov@9858 654 if (next != NULL) {
apetushkov@9858 655 next->set_prev(last);
apetushkov@9858 656 }
apetushkov@9858 657 --count;
apetushkov@9858 658 age_mspace->deallocate(node);
apetushkov@9858 659 }
apetushkov@9858 660 node = next;
apetushkov@9858 661 }
apetushkov@9858 662 insert_free_age_nodes(age_mspace, head, last, count);
apetushkov@9858 663 }
apetushkov@9858 664
apetushkov@9858 665 template <typename Processor>
apetushkov@9858 666 static size_t process_full(Processor& processor, JfrStorageControl& control, JfrStorageAgeMspace* age_mspace) {
apetushkov@9858 667 assert(age_mspace != NULL, "invariant");
apetushkov@9858 668 if (age_mspace->is_full_empty()) {
apetushkov@9858 669 // nothing to do
apetushkov@9858 670 return 0;
apetushkov@9858 671 }
apetushkov@9858 672 size_t count;
apetushkov@9858 673 JfrAgeNode* head;
apetushkov@9858 674 {
apetushkov@9858 675 // fetch age list
apetushkov@9858 676 MutexLockerEx buffer_lock(JfrBuffer_lock, Mutex::_no_safepoint_check_flag);
apetushkov@9858 677 count = age_mspace->full_count();
apetushkov@9858 678 head = age_mspace->clear_full();
apetushkov@9858 679 control.reset_full();
apetushkov@9858 680 }
apetushkov@9858 681 assert(head != NULL, "invariant");
apetushkov@9858 682 assert(count > 0, "invariant");
apetushkov@9858 683 process_age_list(processor, age_mspace, head, count);
apetushkov@9858 684 return count;
apetushkov@9858 685 }
apetushkov@9858 686
apetushkov@9858 687 static void log(size_t count, size_t amount, bool clear = false) {
apetushkov@9858 688 if (count > 0) {
apetushkov@9858 689 if (LogJFR) tty->print_cr("%s " SIZE_FORMAT " full buffer(s) of " SIZE_FORMAT" B of data%s",
apetushkov@9858 690 clear ? "Discarded" : "Wrote", count, amount, clear ? "." : " to chunk.");
apetushkov@9858 691 }
apetushkov@9858 692 }
apetushkov@9858 693
apetushkov@9858 694 // full writer
apetushkov@9858 695 // Assumption is retired only; exclusive access
apetushkov@9858 696 // MutexedWriter -> ReleaseOp
apetushkov@9858 697 //
apetushkov@9858 698 size_t JfrStorage::write_full() {
apetushkov@9858 699 assert(_chunkwriter.is_valid(), "invariant");
apetushkov@9858 700 Thread* const thread = Thread::current();
apetushkov@9858 701 WriteOperation wo(_chunkwriter);
apetushkov@9858 702 MutexedWriteOperation writer(wo); // a retired buffer implies mutexed access
apetushkov@9858 703 ReleaseOperation ro(_transient_mspace, thread);
apetushkov@9858 704 FullOperation cmd(&writer, &ro);
apetushkov@9858 705 const size_t count = process_full(cmd, control(), _age_mspace);
apetushkov@9858 706 log(count, writer.processed());
apetushkov@9858 707 return writer.processed();
apetushkov@9858 708 }
apetushkov@9858 709
apetushkov@9858 710 size_t JfrStorage::clear_full() {
apetushkov@9858 711 DiscardOperation discarder(mutexed); // a retired buffer implies mutexed access
apetushkov@9858 712 const size_t count = process_full(discarder, control(), _age_mspace);
apetushkov@9858 713 log(count, discarder.processed(), true);
apetushkov@9858 714 return discarder.processed();
apetushkov@9858 715 }
apetushkov@9858 716
apetushkov@9858 717 static void scavenge_log(size_t count, size_t amount, size_t current) {
apetushkov@9858 718 if (count > 0) {
apetushkov@9858 719 if (LogJFR) tty->print_cr("Released " SIZE_FORMAT " dead buffer(s) of " SIZE_FORMAT" B of data.", count, amount);
apetushkov@9858 720 if (LogJFR) tty->print_cr("Current number of dead buffers " SIZE_FORMAT "", current);
apetushkov@9858 721 }
apetushkov@9858 722 }
apetushkov@9858 723
apetushkov@9858 724 template <typename Mspace>
apetushkov@9858 725 class Scavenger {
apetushkov@9858 726 private:
apetushkov@9858 727 JfrStorageControl& _control;
apetushkov@9858 728 Mspace* _mspace;
apetushkov@9858 729 size_t _count;
apetushkov@9858 730 size_t _amount;
apetushkov@9858 731 public:
apetushkov@9858 732 typedef typename Mspace::Type Type;
apetushkov@9858 733 Scavenger(JfrStorageControl& control, Mspace* mspace) : _control(control), _mspace(mspace), _count(0), _amount(0) {}
apetushkov@9858 734 bool process(Type* t) {
apetushkov@9858 735 if (t->retired()) {
apetushkov@9928 736 assert(t->identity() != NULL, "invariant");
apetushkov@9928 737 assert(t->empty(), "invariant");
apetushkov@9858 738 assert(!t->transient(), "invariant");
apetushkov@9858 739 assert(!t->lease(), "invariant");
apetushkov@9858 740 ++_count;
apetushkov@9858 741 _amount += t->total_size();
apetushkov@9858 742 t->clear_retired();
apetushkov@9928 743 t->release();
apetushkov@9858 744 _control.decrement_dead();
apetushkov@9858 745 mspace_release_full_critical(t, _mspace);
apetushkov@9858 746 }
apetushkov@9858 747 return true;
apetushkov@9858 748 }
apetushkov@9858 749 size_t processed() const { return _count; }
apetushkov@9858 750 size_t amount() const { return _amount; }
apetushkov@9858 751 };
apetushkov@9858 752
apetushkov@9858 753 size_t JfrStorage::scavenge() {
apetushkov@9858 754 JfrStorageControl& ctrl = control();
apetushkov@9858 755 if (ctrl.dead_count() == 0) {
apetushkov@9858 756 return 0;
apetushkov@9858 757 }
apetushkov@9858 758 Scavenger<JfrThreadLocalMspace> scavenger(ctrl, _thread_local_mspace);
apetushkov@9858 759 process_full_list(scavenger, _thread_local_mspace);
apetushkov@9858 760 scavenge_log(scavenger.processed(), scavenger.amount(), ctrl.dead_count());
apetushkov@9858 761 return scavenger.processed();
apetushkov@9858 762 }

mercurial