aoqi@0: /* aoqi@0: * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #include "precompiled.hpp" aoqi@0: #include "compiler/compileBroker.hpp" aoqi@0: #include "gc_interface/collectedHeap.hpp" aoqi@0: #include "memory/resourceArea.hpp" aoqi@0: #include "oops/method.hpp" aoqi@0: #include "oops/oop.inline.hpp" aoqi@0: #include "runtime/interfaceSupport.hpp" aoqi@0: #include "runtime/mutexLocker.hpp" aoqi@0: #include "runtime/os.hpp" aoqi@0: #include "runtime/thread.inline.hpp" aoqi@0: #include "runtime/vmThread.hpp" aoqi@0: #include "runtime/vm_operations.hpp" aoqi@0: #include "services/runtimeService.hpp" aoqi@0: #include "trace/tracing.hpp" aoqi@0: #include "utilities/dtrace.hpp" aoqi@0: #include "utilities/events.hpp" aoqi@0: #include "utilities/xmlstream.hpp" aoqi@0: aoqi@0: #ifndef USDT2 aoqi@0: HS_DTRACE_PROBE_DECL3(hotspot, vmops__request, char *, uintptr_t, int); aoqi@0: HS_DTRACE_PROBE_DECL3(hotspot, vmops__begin, char *, uintptr_t, int); aoqi@0: HS_DTRACE_PROBE_DECL3(hotspot, vmops__end, char *, uintptr_t, int); aoqi@0: #endif /* !USDT2 */ aoqi@0: aoqi@0: PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC aoqi@0: aoqi@0: // Dummy VM operation to act as first element in our circular double-linked list aoqi@0: class VM_Dummy: public VM_Operation { aoqi@0: VMOp_Type type() const { return VMOp_Dummy; } aoqi@0: void doit() {}; aoqi@0: }; aoqi@0: aoqi@0: VMOperationQueue::VMOperationQueue() { aoqi@0: // The queue is a circular doubled-linked list, which always contains aoqi@0: // one element (i.e., one element means empty). aoqi@0: for(int i = 0; i < nof_priorities; i++) { aoqi@0: _queue_length[i] = 0; aoqi@0: _queue_counter = 0; aoqi@0: _queue[i] = new VM_Dummy(); aoqi@0: _queue[i]->set_next(_queue[i]); aoqi@0: _queue[i]->set_prev(_queue[i]); aoqi@0: } aoqi@0: _drain_list = NULL; aoqi@0: } aoqi@0: aoqi@0: aoqi@0: bool VMOperationQueue::queue_empty(int prio) { aoqi@0: // It is empty if there is exactly one element aoqi@0: bool empty = (_queue[prio] == _queue[prio]->next()); aoqi@0: assert( (_queue_length[prio] == 0 && empty) || aoqi@0: (_queue_length[prio] > 0 && !empty), "sanity check"); aoqi@0: return _queue_length[prio] == 0; aoqi@0: } aoqi@0: aoqi@0: // Inserts an element to the right of the q element aoqi@0: void VMOperationQueue::insert(VM_Operation* q, VM_Operation* n) { aoqi@0: assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check"); aoqi@0: n->set_prev(q); aoqi@0: n->set_next(q->next()); aoqi@0: q->next()->set_prev(n); aoqi@0: q->set_next(n); aoqi@0: } aoqi@0: aoqi@0: void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) { aoqi@0: _queue_length[prio]++; aoqi@0: insert(_queue[prio]->next(), op); aoqi@0: } aoqi@0: aoqi@0: void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) { aoqi@0: _queue_length[prio]++; aoqi@0: insert(_queue[prio]->prev(), op); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void VMOperationQueue::unlink(VM_Operation* q) { aoqi@0: assert(q->next()->prev() == q && q->prev()->next() == q, "sanity check"); aoqi@0: q->prev()->set_next(q->next()); aoqi@0: q->next()->set_prev(q->prev()); aoqi@0: } aoqi@0: aoqi@0: VM_Operation* VMOperationQueue::queue_remove_front(int prio) { aoqi@0: if (queue_empty(prio)) return NULL; aoqi@0: assert(_queue_length[prio] >= 0, "sanity check"); aoqi@0: _queue_length[prio]--; aoqi@0: VM_Operation* r = _queue[prio]->next(); aoqi@0: assert(r != _queue[prio], "cannot remove base element"); aoqi@0: unlink(r); aoqi@0: return r; aoqi@0: } aoqi@0: aoqi@0: VM_Operation* VMOperationQueue::queue_drain(int prio) { aoqi@0: if (queue_empty(prio)) return NULL; aoqi@0: DEBUG_ONLY(int length = _queue_length[prio];); aoqi@0: assert(length >= 0, "sanity check"); aoqi@0: _queue_length[prio] = 0; aoqi@0: VM_Operation* r = _queue[prio]->next(); aoqi@0: assert(r != _queue[prio], "cannot remove base element"); aoqi@0: // remove links to base element from head and tail aoqi@0: r->set_prev(NULL); aoqi@0: _queue[prio]->prev()->set_next(NULL); aoqi@0: // restore queue to empty state aoqi@0: _queue[prio]->set_next(_queue[prio]); aoqi@0: _queue[prio]->set_prev(_queue[prio]); aoqi@0: assert(queue_empty(prio), "drain corrupted queue"); aoqi@0: #ifdef ASSERT aoqi@0: int len = 0; aoqi@0: VM_Operation* cur; aoqi@0: for(cur = r; cur != NULL; cur=cur->next()) len++; aoqi@0: assert(len == length, "drain lost some ops"); aoqi@0: #endif aoqi@0: return r; aoqi@0: } aoqi@0: aoqi@0: void VMOperationQueue::queue_oops_do(int queue, OopClosure* f) { aoqi@0: VM_Operation* cur = _queue[queue]; aoqi@0: cur = cur->next(); aoqi@0: while (cur != _queue[queue]) { aoqi@0: cur->oops_do(f); aoqi@0: cur = cur->next(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void VMOperationQueue::drain_list_oops_do(OopClosure* f) { aoqi@0: VM_Operation* cur = _drain_list; aoqi@0: while (cur != NULL) { aoqi@0: cur->oops_do(f); aoqi@0: cur = cur->next(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: //----------------------------------------------------------------- aoqi@0: // High-level interface aoqi@0: bool VMOperationQueue::add(VM_Operation *op) { aoqi@0: aoqi@0: #ifndef USDT2 aoqi@0: HS_DTRACE_PROBE3(hotspot, vmops__request, op->name(), strlen(op->name()), aoqi@0: op->evaluation_mode()); aoqi@0: #else /* USDT2 */ aoqi@0: HOTSPOT_VMOPS_REQUEST( aoqi@0: (char *) op->name(), strlen(op->name()), aoqi@0: op->evaluation_mode()); aoqi@0: #endif /* USDT2 */ aoqi@0: aoqi@0: // Encapsulates VM queue policy. Currently, that aoqi@0: // only involves putting them on the right list aoqi@0: if (op->evaluate_at_safepoint()) { aoqi@0: queue_add_back(SafepointPriority, op); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: queue_add_back(MediumPriority, op); aoqi@0: return true; aoqi@0: } aoqi@0: aoqi@0: VM_Operation* VMOperationQueue::remove_next() { aoqi@0: // Assuming VMOperation queue is two-level priority queue. If there are aoqi@0: // more than two priorities, we need a different scheduling algorithm. aoqi@0: assert(SafepointPriority == 0 && MediumPriority == 1 && nof_priorities == 2, aoqi@0: "current algorithm does not work"); aoqi@0: aoqi@0: // simple counter based scheduling to prevent starvation of lower priority aoqi@0: // queue. -- see 4390175 aoqi@0: int high_prio, low_prio; aoqi@0: if (_queue_counter++ < 10) { aoqi@0: high_prio = SafepointPriority; aoqi@0: low_prio = MediumPriority; aoqi@0: } else { aoqi@0: _queue_counter = 0; aoqi@0: high_prio = MediumPriority; aoqi@0: low_prio = SafepointPriority; aoqi@0: } aoqi@0: aoqi@0: return queue_remove_front(queue_empty(high_prio) ? low_prio : high_prio); aoqi@0: } aoqi@0: aoqi@0: void VMOperationQueue::oops_do(OopClosure* f) { aoqi@0: for(int i = 0; i < nof_priorities; i++) { aoqi@0: queue_oops_do(i, f); aoqi@0: } aoqi@0: drain_list_oops_do(f); aoqi@0: } aoqi@0: aoqi@0: aoqi@0: //------------------------------------------------------------------------------------------------------------------ aoqi@0: // Implementation of VMThread stuff aoqi@0: aoqi@0: bool VMThread::_should_terminate = false; aoqi@0: bool VMThread::_terminated = false; aoqi@0: Monitor* VMThread::_terminate_lock = NULL; aoqi@0: VMThread* VMThread::_vm_thread = NULL; aoqi@0: VM_Operation* VMThread::_cur_vm_operation = NULL; aoqi@0: VMOperationQueue* VMThread::_vm_queue = NULL; aoqi@0: PerfCounter* VMThread::_perf_accumulated_vm_operation_time = NULL; aoqi@0: aoqi@0: aoqi@0: void VMThread::create() { aoqi@0: assert(vm_thread() == NULL, "we can only allocate one VMThread"); aoqi@0: _vm_thread = new VMThread(); aoqi@0: aoqi@0: // Create VM operation queue aoqi@0: _vm_queue = new VMOperationQueue(); aoqi@0: guarantee(_vm_queue != NULL, "just checking"); aoqi@0: aoqi@0: _terminate_lock = new Monitor(Mutex::safepoint, "VMThread::_terminate_lock", true); aoqi@0: aoqi@0: if (UsePerfData) { aoqi@0: // jvmstat performance counters aoqi@0: Thread* THREAD = Thread::current(); aoqi@0: _perf_accumulated_vm_operation_time = aoqi@0: PerfDataManager::create_counter(SUN_THREADS, "vmOperationTime", aoqi@0: PerfData::U_Ticks, CHECK); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: VMThread::VMThread() : NamedThread() { aoqi@0: set_name("VM Thread"); aoqi@0: } aoqi@0: aoqi@0: void VMThread::destroy() { aoqi@0: if (_vm_thread != NULL) { aoqi@0: delete _vm_thread; aoqi@0: _vm_thread = NULL; // VM thread is gone aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void VMThread::run() { aoqi@0: assert(this == vm_thread(), "check"); aoqi@0: aoqi@0: this->initialize_thread_local_storage(); aoqi@0: this->record_stack_base_and_size(); aoqi@0: // Notify_lock wait checks on active_handles() to rewait in aoqi@0: // case of spurious wakeup, it should wait on the last aoqi@0: // value set prior to the notify aoqi@0: this->set_active_handles(JNIHandleBlock::allocate_block()); aoqi@0: aoqi@0: { aoqi@0: MutexLocker ml(Notify_lock); aoqi@0: Notify_lock->notify(); aoqi@0: } aoqi@0: // Notify_lock is destroyed by Threads::create_vm() aoqi@0: aoqi@0: int prio = (VMThreadPriority == -1) aoqi@0: ? os::java_to_os_priority[NearMaxPriority] aoqi@0: : VMThreadPriority; aoqi@0: // Note that I cannot call os::set_priority because it expects Java aoqi@0: // priorities and I am *explicitly* using OS priorities so that it's aoqi@0: // possible to set the VM thread priority higher than any Java thread. aoqi@0: os::set_native_priority( this, prio ); aoqi@0: aoqi@0: // Wait for VM_Operations until termination aoqi@0: this->loop(); aoqi@0: aoqi@0: // Note the intention to exit before safepointing. aoqi@0: // 6295565 This has the effect of waiting for any large tty aoqi@0: // outputs to finish. aoqi@0: if (xtty != NULL) { aoqi@0: ttyLocker ttyl; aoqi@0: xtty->begin_elem("destroy_vm"); aoqi@0: xtty->stamp(); aoqi@0: xtty->end_elem(); aoqi@0: assert(should_terminate(), "termination flag must be set"); aoqi@0: } aoqi@0: aoqi@0: // 4526887 let VM thread exit at Safepoint aoqi@0: SafepointSynchronize::begin(); aoqi@0: aoqi@0: if (VerifyBeforeExit) { aoqi@0: HandleMark hm(VMThread::vm_thread()); aoqi@0: // Among other things, this ensures that Eden top is correct. aoqi@0: Universe::heap()->prepare_for_verify(); aoqi@0: os::check_heap(); aoqi@0: // Silent verification so as not to pollute normal output, aoqi@0: // unless we really asked for it. aoqi@0: Universe::verify(!(PrintGCDetails || Verbose) || VerifySilently); aoqi@0: } aoqi@0: aoqi@0: CompileBroker::set_should_block(); aoqi@0: aoqi@0: // wait for threads (compiler threads or daemon threads) in the aoqi@0: // _thread_in_native state to block. aoqi@0: VM_Exit::wait_for_threads_in_native_to_block(); aoqi@0: aoqi@0: // signal other threads that VM process is gone aoqi@0: { aoqi@0: // Note: we must have the _no_safepoint_check_flag. Mutex::lock() allows aoqi@0: // VM thread to enter any lock at Safepoint as long as its _owner is NULL. aoqi@0: // If that happens after _terminate_lock->wait() has unset _owner aoqi@0: // but before it actually drops the lock and waits, the notification below aoqi@0: // may get lost and we will have a hang. To avoid this, we need to use aoqi@0: // Mutex::lock_without_safepoint_check(). aoqi@0: MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag); aoqi@0: _terminated = true; aoqi@0: _terminate_lock->notify(); aoqi@0: } aoqi@0: aoqi@0: // Thread destructor usually does this. aoqi@0: ThreadLocalStorage::set_thread(NULL); aoqi@0: aoqi@0: // Deletion must be done synchronously by the JNI DestroyJavaVM thread aoqi@0: // so that the VMThread deletion completes before the main thread frees aoqi@0: // up the CodeHeap. aoqi@0: aoqi@0: } aoqi@0: aoqi@0: aoqi@0: // Notify the VMThread that the last non-daemon JavaThread has terminated, aoqi@0: // and wait until operation is performed. aoqi@0: void VMThread::wait_for_vm_thread_exit() { aoqi@0: { MutexLocker mu(VMOperationQueue_lock); aoqi@0: _should_terminate = true; aoqi@0: VMOperationQueue_lock->notify(); aoqi@0: } aoqi@0: aoqi@0: // Note: VM thread leaves at Safepoint. We are not stopped by Safepoint aoqi@0: // because this thread has been removed from the threads list. But anything aoqi@0: // that could get blocked by Safepoint should not be used after this point, aoqi@0: // otherwise we will hang, since there is no one can end the safepoint. aoqi@0: aoqi@0: // Wait until VM thread is terminated aoqi@0: // Note: it should be OK to use Terminator_lock here. But this is called aoqi@0: // at a very delicate time (VM shutdown) and we are operating in non- VM aoqi@0: // thread at Safepoint. It's safer to not share lock with other threads. aoqi@0: { MutexLockerEx ml(_terminate_lock, Mutex::_no_safepoint_check_flag); aoqi@0: while(!VMThread::is_terminated()) { aoqi@0: _terminate_lock->wait(Mutex::_no_safepoint_check_flag); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void VMThread::print_on(outputStream* st) const { aoqi@0: st->print("\"%s\" ", name()); aoqi@0: Thread::print_on(st); aoqi@0: st->cr(); aoqi@0: } aoqi@0: aoqi@0: void VMThread::evaluate_operation(VM_Operation* op) { aoqi@0: ResourceMark rm; aoqi@0: aoqi@0: { aoqi@0: PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time()); aoqi@0: #ifndef USDT2 aoqi@0: HS_DTRACE_PROBE3(hotspot, vmops__begin, op->name(), strlen(op->name()), aoqi@0: op->evaluation_mode()); aoqi@0: #else /* USDT2 */ aoqi@0: HOTSPOT_VMOPS_BEGIN( aoqi@0: (char *) op->name(), strlen(op->name()), aoqi@0: op->evaluation_mode()); aoqi@0: #endif /* USDT2 */ aoqi@0: aoqi@0: EventExecuteVMOperation event; aoqi@0: aoqi@0: op->evaluate(); aoqi@0: aoqi@0: if (event.should_commit()) { aoqi@0: bool is_concurrent = op->evaluate_concurrently(); aoqi@0: event.set_operation(op->type()); aoqi@0: event.set_safepoint(op->evaluate_at_safepoint()); aoqi@0: event.set_blocking(!is_concurrent); aoqi@0: // Only write caller thread information for non-concurrent vm operations. aoqi@0: // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown. aoqi@0: // This is because the caller thread could have exited already. aoqi@0: event.set_caller(is_concurrent ? 0 : op->calling_thread()->osthread()->thread_id()); aoqi@0: event.commit(); aoqi@0: } aoqi@0: aoqi@0: #ifndef USDT2 aoqi@0: HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()), aoqi@0: op->evaluation_mode()); aoqi@0: #else /* USDT2 */ aoqi@0: HOTSPOT_VMOPS_END( aoqi@0: (char *) op->name(), strlen(op->name()), aoqi@0: op->evaluation_mode()); aoqi@0: #endif /* USDT2 */ aoqi@0: } aoqi@0: aoqi@0: // Last access of info in _cur_vm_operation! aoqi@0: bool c_heap_allocated = op->is_cheap_allocated(); aoqi@0: aoqi@0: // Mark as completed aoqi@0: if (!op->evaluate_concurrently()) { aoqi@0: op->calling_thread()->increment_vm_operation_completed_count(); aoqi@0: } aoqi@0: // It is unsafe to access the _cur_vm_operation after the 'increment_vm_operation_completed_count' call, aoqi@0: // since if it is stack allocated the calling thread might have deallocated aoqi@0: if (c_heap_allocated) { aoqi@0: delete _cur_vm_operation; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void VMThread::loop() { aoqi@0: assert(_cur_vm_operation == NULL, "no current one should be executing"); aoqi@0: aoqi@0: while(true) { aoqi@0: VM_Operation* safepoint_ops = NULL; aoqi@0: // aoqi@0: // Wait for VM operation aoqi@0: // aoqi@0: // use no_safepoint_check to get lock without attempting to "sneak" aoqi@0: { MutexLockerEx mu_queue(VMOperationQueue_lock, aoqi@0: Mutex::_no_safepoint_check_flag); aoqi@0: aoqi@0: // Look for new operation aoqi@0: assert(_cur_vm_operation == NULL, "no current one should be executing"); aoqi@0: _cur_vm_operation = _vm_queue->remove_next(); aoqi@0: aoqi@0: // Stall time tracking code aoqi@0: if (PrintVMQWaitTime && _cur_vm_operation != NULL && aoqi@0: !_cur_vm_operation->evaluate_concurrently()) { aoqi@0: long stall = os::javaTimeMillis() - _cur_vm_operation->timestamp(); aoqi@0: if (stall > 0) aoqi@0: tty->print_cr("%s stall: %Ld", _cur_vm_operation->name(), stall); aoqi@0: } aoqi@0: aoqi@0: while (!should_terminate() && _cur_vm_operation == NULL) { aoqi@0: // wait with a timeout to guarantee safepoints at regular intervals aoqi@0: bool timedout = aoqi@0: VMOperationQueue_lock->wait(Mutex::_no_safepoint_check_flag, aoqi@0: GuaranteedSafepointInterval); aoqi@0: aoqi@0: // Support for self destruction aoqi@0: if ((SelfDestructTimer != 0) && !is_error_reported() && aoqi@0: (os::elapsedTime() > SelfDestructTimer * 60)) { aoqi@0: tty->print_cr("VM self-destructed"); aoqi@0: exit(-1); aoqi@0: } aoqi@0: aoqi@0: if (timedout && (SafepointALot || aoqi@0: SafepointSynchronize::is_cleanup_needed())) { aoqi@0: MutexUnlockerEx mul(VMOperationQueue_lock, aoqi@0: Mutex::_no_safepoint_check_flag); aoqi@0: // Force a safepoint since we have not had one for at least aoqi@0: // 'GuaranteedSafepointInterval' milliseconds. This will run all aoqi@0: // the clean-up processing that needs to be done regularly at a aoqi@0: // safepoint aoqi@0: SafepointSynchronize::begin(); aoqi@0: #ifdef ASSERT aoqi@0: if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot(); aoqi@0: #endif aoqi@0: SafepointSynchronize::end(); aoqi@0: } aoqi@0: _cur_vm_operation = _vm_queue->remove_next(); aoqi@0: aoqi@0: // If we are at a safepoint we will evaluate all the operations that aoqi@0: // follow that also require a safepoint aoqi@0: if (_cur_vm_operation != NULL && aoqi@0: _cur_vm_operation->evaluate_at_safepoint()) { aoqi@0: safepoint_ops = _vm_queue->drain_at_safepoint_priority(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (should_terminate()) break; aoqi@0: } // Release mu_queue_lock aoqi@0: aoqi@0: // aoqi@0: // Execute VM operation aoqi@0: // aoqi@0: { HandleMark hm(VMThread::vm_thread()); aoqi@0: aoqi@0: EventMark em("Executing VM operation: %s", vm_operation()->name()); aoqi@0: assert(_cur_vm_operation != NULL, "we should have found an operation to execute"); aoqi@0: aoqi@0: // Give the VM thread an extra quantum. Jobs tend to be bursty and this aoqi@0: // helps the VM thread to finish up the job. aoqi@0: // FIXME: When this is enabled and there are many threads, this can degrade aoqi@0: // performance significantly. aoqi@0: if( VMThreadHintNoPreempt ) aoqi@0: os::hint_no_preempt(); aoqi@0: aoqi@0: // If we are at a safepoint we will evaluate all the operations that aoqi@0: // follow that also require a safepoint aoqi@0: if (_cur_vm_operation->evaluate_at_safepoint()) { aoqi@0: aoqi@0: _vm_queue->set_drain_list(safepoint_ops); // ensure ops can be scanned aoqi@0: aoqi@0: SafepointSynchronize::begin(); aoqi@0: evaluate_operation(_cur_vm_operation); aoqi@0: // now process all queued safepoint ops, iteratively draining aoqi@0: // the queue until there are none left aoqi@0: do { aoqi@0: _cur_vm_operation = safepoint_ops; aoqi@0: if (_cur_vm_operation != NULL) { aoqi@0: do { aoqi@0: // evaluate_operation deletes the op object so we have aoqi@0: // to grab the next op now aoqi@0: VM_Operation* next = _cur_vm_operation->next(); aoqi@0: _vm_queue->set_drain_list(next); aoqi@0: evaluate_operation(_cur_vm_operation); aoqi@0: _cur_vm_operation = next; aoqi@0: if (PrintSafepointStatistics) { aoqi@0: SafepointSynchronize::inc_vmop_coalesced_count(); aoqi@0: } aoqi@0: } while (_cur_vm_operation != NULL); aoqi@0: } aoqi@0: // There is a chance that a thread enqueued a safepoint op aoqi@0: // since we released the op-queue lock and initiated the safepoint. aoqi@0: // So we drain the queue again if there is anything there, as an aoqi@0: // optimization to try and reduce the number of safepoints. aoqi@0: // As the safepoint synchronizes us with JavaThreads we will see aoqi@0: // any enqueue made by a JavaThread, but the peek will not aoqi@0: // necessarily detect a concurrent enqueue by a GC thread, but aoqi@0: // that simply means the op will wait for the next major cycle of the aoqi@0: // VMThread - just as it would if the GC thread lost the race for aoqi@0: // the lock. aoqi@0: if (_vm_queue->peek_at_safepoint_priority()) { aoqi@0: // must hold lock while draining queue aoqi@0: MutexLockerEx mu_queue(VMOperationQueue_lock, aoqi@0: Mutex::_no_safepoint_check_flag); aoqi@0: safepoint_ops = _vm_queue->drain_at_safepoint_priority(); aoqi@0: } else { aoqi@0: safepoint_ops = NULL; aoqi@0: } aoqi@0: } while(safepoint_ops != NULL); aoqi@0: aoqi@0: _vm_queue->set_drain_list(NULL); aoqi@0: aoqi@0: // Complete safepoint synchronization aoqi@0: SafepointSynchronize::end(); aoqi@0: aoqi@0: } else { // not a safepoint operation aoqi@0: if (TraceLongCompiles) { aoqi@0: elapsedTimer t; aoqi@0: t.start(); aoqi@0: evaluate_operation(_cur_vm_operation); aoqi@0: t.stop(); aoqi@0: double secs = t.seconds(); aoqi@0: if (secs * 1e3 > LongCompileThreshold) { aoqi@0: // XXX - _cur_vm_operation should not be accessed after aoqi@0: // the completed count has been incremented; the waiting aoqi@0: // thread may have already freed this memory. aoqi@0: tty->print_cr("vm %s: %3.7f secs]", _cur_vm_operation->name(), secs); aoqi@0: } aoqi@0: } else { aoqi@0: evaluate_operation(_cur_vm_operation); aoqi@0: } aoqi@0: aoqi@0: _cur_vm_operation = NULL; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // aoqi@0: // Notify (potential) waiting Java thread(s) - lock without safepoint aoqi@0: // check so that sneaking is not possible aoqi@0: { MutexLockerEx mu(VMOperationRequest_lock, aoqi@0: Mutex::_no_safepoint_check_flag); aoqi@0: VMOperationRequest_lock->notify_all(); aoqi@0: } aoqi@0: aoqi@0: // aoqi@0: // We want to make sure that we get to a safepoint regularly. aoqi@0: // aoqi@0: if (SafepointALot || SafepointSynchronize::is_cleanup_needed()) { aoqi@0: long interval = SafepointSynchronize::last_non_safepoint_interval(); aoqi@0: bool max_time_exceeded = GuaranteedSafepointInterval != 0 && (interval > GuaranteedSafepointInterval); aoqi@0: if (SafepointALot || max_time_exceeded) { aoqi@0: HandleMark hm(VMThread::vm_thread()); aoqi@0: SafepointSynchronize::begin(); aoqi@0: SafepointSynchronize::end(); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: void VMThread::execute(VM_Operation* op) { aoqi@0: Thread* t = Thread::current(); aoqi@0: aoqi@0: if (!t->is_VM_thread()) { aoqi@0: SkipGCALot sgcalot(t); // avoid re-entrant attempts to gc-a-lot aoqi@0: // JavaThread or WatcherThread aoqi@0: bool concurrent = op->evaluate_concurrently(); aoqi@0: // only blocking VM operations need to verify the caller's safepoint state: aoqi@0: if (!concurrent) { aoqi@0: t->check_for_valid_safepoint_state(true); aoqi@0: } aoqi@0: aoqi@0: // New request from Java thread, evaluate prologue aoqi@0: if (!op->doit_prologue()) { aoqi@0: return; // op was cancelled aoqi@0: } aoqi@0: aoqi@0: // Setup VM_operations for execution aoqi@0: op->set_calling_thread(t, Thread::get_priority(t)); aoqi@0: aoqi@0: // It does not make sense to execute the epilogue, if the VM operation object is getting aoqi@0: // deallocated by the VM thread. aoqi@0: bool execute_epilog = !op->is_cheap_allocated(); aoqi@0: assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated"); aoqi@0: aoqi@0: // Get ticket number for non-concurrent VM operations aoqi@0: int ticket = 0; aoqi@0: if (!concurrent) { aoqi@0: ticket = t->vm_operation_ticket(); aoqi@0: } aoqi@0: aoqi@0: // Add VM operation to list of waiting threads. We are guaranteed not to block while holding the aoqi@0: // VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests aoqi@0: // to be queued up during a safepoint synchronization. aoqi@0: { aoqi@0: VMOperationQueue_lock->lock_without_safepoint_check(); aoqi@0: bool ok = _vm_queue->add(op); aoqi@0: op->set_timestamp(os::javaTimeMillis()); aoqi@0: VMOperationQueue_lock->notify(); aoqi@0: VMOperationQueue_lock->unlock(); aoqi@0: // VM_Operation got skipped aoqi@0: if (!ok) { aoqi@0: assert(concurrent, "can only skip concurrent tasks"); aoqi@0: if (op->is_cheap_allocated()) delete op; aoqi@0: return; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (!concurrent) { aoqi@0: // Wait for completion of request (non-concurrent) aoqi@0: // Note: only a JavaThread triggers the safepoint check when locking aoqi@0: MutexLocker mu(VMOperationRequest_lock); aoqi@0: while(t->vm_operation_completed_count() < ticket) { aoqi@0: VMOperationRequest_lock->wait(!t->is_Java_thread()); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: if (execute_epilog) { aoqi@0: op->doit_epilogue(); aoqi@0: } aoqi@0: } else { aoqi@0: // invoked by VM thread; usually nested VM operation aoqi@0: assert(t->is_VM_thread(), "must be a VM thread"); aoqi@0: VM_Operation* prev_vm_operation = vm_operation(); aoqi@0: if (prev_vm_operation != NULL) { aoqi@0: // Check the VM operation allows nested VM operation. This normally not the case, e.g., the compiler aoqi@0: // does not allow nested scavenges or compiles. aoqi@0: if (!prev_vm_operation->allow_nested_vm_operations()) { aoqi@0: fatal(err_msg("Nested VM operation %s requested by operation %s", aoqi@0: op->name(), vm_operation()->name())); aoqi@0: } aoqi@0: op->set_calling_thread(prev_vm_operation->calling_thread(), prev_vm_operation->priority()); aoqi@0: } aoqi@0: aoqi@0: EventMark em("Executing %s VM operation: %s", prev_vm_operation ? "nested" : "", op->name()); aoqi@0: aoqi@0: // Release all internal handles after operation is evaluated aoqi@0: HandleMark hm(t); aoqi@0: _cur_vm_operation = op; aoqi@0: aoqi@0: if (op->evaluate_at_safepoint() && !SafepointSynchronize::is_at_safepoint()) { aoqi@0: SafepointSynchronize::begin(); aoqi@0: op->evaluate(); aoqi@0: SafepointSynchronize::end(); aoqi@0: } else { aoqi@0: op->evaluate(); aoqi@0: } aoqi@0: aoqi@0: // Free memory if needed aoqi@0: if (op->is_cheap_allocated()) delete op; aoqi@0: aoqi@0: _cur_vm_operation = prev_vm_operation; aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: aoqi@0: void VMThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { aoqi@0: Thread::oops_do(f, cld_f, cf); aoqi@0: _vm_queue->oops_do(f); aoqi@0: } aoqi@0: aoqi@0: //------------------------------------------------------------------------------------------------------------------ aoqi@0: #ifndef PRODUCT aoqi@0: aoqi@0: void VMOperationQueue::verify_queue(int prio) { aoqi@0: // Check that list is correctly linked aoqi@0: int length = _queue_length[prio]; aoqi@0: VM_Operation *cur = _queue[prio]; aoqi@0: int i; aoqi@0: aoqi@0: // Check forward links aoqi@0: for(i = 0; i < length; i++) { aoqi@0: cur = cur->next(); aoqi@0: assert(cur != _queue[prio], "list to short (forward)"); aoqi@0: } aoqi@0: assert(cur->next() == _queue[prio], "list to long (forward)"); aoqi@0: aoqi@0: // Check backwards links aoqi@0: cur = _queue[prio]; aoqi@0: for(i = 0; i < length; i++) { aoqi@0: cur = cur->prev(); aoqi@0: assert(cur != _queue[prio], "list to short (backwards)"); aoqi@0: } aoqi@0: assert(cur->prev() == _queue[prio], "list to long (backwards)"); aoqi@0: } aoqi@0: aoqi@0: #endif aoqi@0: aoqi@0: void VMThread::verify() { aoqi@0: oops_do(&VerifyOopClosure::verify_oop, NULL, NULL); aoqi@0: }