832 assert(SharedHeap::heap()->workers()->active_workers() > 0, |
832 assert(SharedHeap::heap()->workers()->active_workers() > 0, |
833 "Should only fail when parallel."); |
833 "Should only fail when parallel."); |
834 return false; |
834 return false; |
835 } |
835 } |
836 |
836 |
837 void Thread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { |
837 void Thread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { |
838 active_handles()->oops_do(f); |
838 active_handles()->oops_do(f); |
839 // Do oop for ThreadShadow |
839 // Do oop for ThreadShadow |
840 f->do_oop((oop*)&_pending_exception); |
840 f->do_oop((oop*)&_pending_exception); |
841 handle_area()->oops_do(f); |
841 handle_area()->oops_do(f); |
842 } |
842 } |
2728 _cur_thr->set_processed_thread(NULL); |
2728 _cur_thr->set_processed_thread(NULL); |
2729 } |
2729 } |
2730 } |
2730 } |
2731 }; |
2731 }; |
2732 |
2732 |
2733 void JavaThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { |
2733 void JavaThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { |
2734 // Verify that the deferred card marks have been flushed. |
2734 // Verify that the deferred card marks have been flushed. |
2735 assert(deferred_card_mark().is_empty(), "Should be empty during GC"); |
2735 assert(deferred_card_mark().is_empty(), "Should be empty during GC"); |
2736 |
2736 |
2737 // The ThreadProfiler oops_do is done from FlatProfiler::oops_do |
2737 // The ThreadProfiler oops_do is done from FlatProfiler::oops_do |
2738 // since there may be more than one thread using each ThreadProfiler. |
2738 // since there may be more than one thread using each ThreadProfiler. |
3251 #ifndef PRODUCT |
3251 #ifndef PRODUCT |
3252 _ideal_graph_printer = NULL; |
3252 _ideal_graph_printer = NULL; |
3253 #endif |
3253 #endif |
3254 } |
3254 } |
3255 |
3255 |
3256 void CompilerThread::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { |
3256 void CompilerThread::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { |
3257 JavaThread::oops_do(f, cld_f, cf); |
3257 JavaThread::oops_do(f, cld_f, cf); |
3258 if (_scanned_nmethod != NULL && cf != NULL) { |
3258 if (_scanned_nmethod != NULL && cf != NULL) { |
3259 // Safepoints can occur when the sweeper is scanning an nmethod so |
3259 // Safepoints can occur when the sweeper is scanning an nmethod so |
3260 // process it here to make sure it isn't unloaded in the middle of |
3260 // process it here to make sure it isn't unloaded in the middle of |
3261 // a scan. |
3261 // a scan. |
4165 // In particular, these things should never be called when the Threads_lock |
4165 // In particular, these things should never be called when the Threads_lock |
4166 // is held by some other thread. (Note: the Safepoint abstraction also |
4166 // is held by some other thread. (Note: the Safepoint abstraction also |
4167 // uses the Threads_lock to gurantee this property. It also makes sure that |
4167 // uses the Threads_lock to gurantee this property. It also makes sure that |
4168 // all threads gets blocked when exiting or starting). |
4168 // all threads gets blocked when exiting or starting). |
4169 |
4169 |
4170 void Threads::oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { |
4170 void Threads::oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { |
4171 ALL_JAVA_THREADS(p) { |
4171 ALL_JAVA_THREADS(p) { |
4172 p->oops_do(f, cld_f, cf); |
4172 p->oops_do(f, cld_f, cf); |
4173 } |
4173 } |
4174 VMThread::vm_thread()->oops_do(f, cld_f, cf); |
4174 VMThread::vm_thread()->oops_do(f, cld_f, cf); |
4175 } |
4175 } |
4176 |
4176 |
4177 void Threads::possibly_parallel_oops_do(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf) { |
4177 void Threads::possibly_parallel_oops_do(OopClosure* f, CLDClosure* cld_f, CodeBlobClosure* cf) { |
4178 // Introduce a mechanism allowing parallel threads to claim threads as |
4178 // Introduce a mechanism allowing parallel threads to claim threads as |
4179 // root groups. Overhead should be small enough to use all the time, |
4179 // root groups. Overhead should be small enough to use all the time, |
4180 // even in sequential code. |
4180 // even in sequential code. |
4181 SharedHeap* sh = SharedHeap::heap(); |
4181 SharedHeap* sh = SharedHeap::heap(); |
4182 // Cannot yet substitute active_workers for n_par_threads |
4182 // Cannot yet substitute active_workers for n_par_threads |